import copy
import json
import random
import time
import math
import re
import traceback
import urllib
from urllib import parse
import base64
import requests

from parsel import Selector
from re_common.baselibrary.database.mysql import json_update
from re_common.baselibrary.utils.basedict import BaseDicts
from re_common.baselibrary.utils.basetime import BaseTime
from re_common.baselibrary.utils.baseurl import BaseUrl
from re_common.vip.baseencodeid import BaseLngid

from apps.crawler_platform.core_platform.core_sql import CoreSqlValue
from apps.crawler_platform.core_platform.g_model import DealModel, CallBackModel, DealInsertModel, DealUpdateModel, \
    OperatorSqlModel, DealItemModel, \
    EtlDealModel, PolicyListModel, PolicyArticleModel

__all__ = [
    "policy_fgwgxzflist1_callback",
    "policy_fgwgxzflist2_callback",
    "policy_fgwgxzfarticle_callback",
    "policy_fgwgxzfarticle_etl_callback",
    "policy_gxtgxzflist_callback",
    "policy_gxtgxzfarticle_callback",
    "policy_gxtgxzfarticle_etl_callback",
    "policy_kjtgxzflist_callback",
    "policy_kjtgxzfarticle_callback",
    "policy_kjtgxzfarticle_etl_callback",
    "policy_jytgxzflist_callback",
    "policy_jytgxzfarticle_callback",
    "policy_jytgxzfarticle_etl_callback",
    "policy_mztgxzflist_callback",
    "policy_mztgxzfarticle_callback",
    "policy_mztgxzfarticle_etl_callback",
    "policy_cztgxzflist_callback",
    "policy_cztgxzflist1_callback",
    "policy_cztgxzfarticle_callback",
    "policy_cztgxzfarticle_etl_callback",
    "policy_rstgxzflist_callback",
    "policy_rstgxzfarticle_callback",
    "policy_rstgxzfarticle_etl_callback",
    "policy_nynctgxzflist_callback",
    "policy_nynctgxzflist1_callback",
    "policy_nynctgxzfarticle_callback",
    "policy_nynctgxzfarticle_etl_callback",
    "policy_zjtgxzflist_callback",
    "policy_zjtgxzfarticle_callback",
    "policy_zjtgxzfarticle_etl_callback",
    "policy_wsjkwgxzflist_callback",
    "policy_wsjkwgxzfarticle_callback",
    "policy_wsjkwgxzfarticle_etl_callback",
    "policy_nanninglist_callback",
    "policy_nanninglist1_callback",
    "policy_nanningarticle_callback",
    "policy_nanningarticle_etl_callback",
    "policy_liuzhoulist_callback",
    "policy_liuzhoulist1_callback",
    "policy_liuzhouarticle_callback",
    "policy_liuzhouarticle_etl_callback",
    "policy_guilinlist_callback",
    "policy_guilinlist1_callback",
    "policy_guilinarticle_callback",
    "policy_guilinarticle_etl_callback",
    "policy_wuzhoulist_callback",
    "policy_wuzhoulist1_callback",
    "policy_wuzhouarticle_callback",
    "policy_wuzhouarticle_etl_callback",
    "policy_beihailist_callback",
    "policy_beihaiarticle_callback",
    "policy_beihaiarticle_etl_callback",
    "policy_chongzuolist_callback",
    "policy_chongzuolist1_callback",
    "policy_chongzuoarticle_callback",
    "policy_chongzuoarticle_etl_callback",
    "policy_laibinlist_callback",
    "policy_laibinarticle_callback",
    "policy_laibinarticle_etl_callback",
    "policy_gxhzlist_callback",
    "policy_gxhzarticle_callback",
    "policy_gxhzarticle_etl_callback",
    "policy_yulinlist_callback",
    "policy_yulinarticle_callback",
    "policy_yulinarticle_etl_callback",
    "policy_baiselist_callback",
    "policy_baiselist1_callback",
    "policy_baisearticle_callback",
    "policy_baisearticle_etl_callback",
    "policy_hechilist_callback",
    "policy_hechilist1_callback",
    "policy_hechiarticle_callback",
    "policy_hechiarticle_etl_callback",
    "policy_qinzhoulist_callback",
    "policy_qinzhouarticle_callback",
    "policy_qinzhouarticle_etl_callback",
    "policy_fcgslist_callback",
    "policy_fcgsarticle_callback",
    "policy_fcgsarticle_etl_callback",
    "policy_gxgglist_callback",
    "policy_gxgglist1_callback",
    "policy_gxggarticle_callback",
    "policy_gxggarticle_etl_callback",
    "policy_fgwhenanlist_callback",
    "policy_fgwhenanarticle_callback",
    "policy_fgwhenanarticle_etl_callback",
    "policy_gxthenanlist_callback",
    "policy_gxthenanarticle_callback",
    "policy_gxthenanarticle_etl_callback",
    "policy_kjthenanlist_callback",
    "policy_kjthenanarticle_callback",
    "policy_kjthenanarticle_etl_callback",
    "policy_jythenanlist_callback",
    "policy_jythenanarticle_callback",
    "policy_jythenanarticle_etl_callback",
    "policy_mzthenanlist_callback",
    "policy_mzthenanarticle_callback",
    "policy_mzthenanarticle_etl_callback",
    "policy_czthenanlist_callback",
    "policy_czthenanarticle_callback",
    "policy_czthenanarticle_etl_callback",
    "policy_hrsshenanlist_callback",
    "policy_hrsshenanlist1_callback",
    "policy_hrsshenanarticle_callback",
    "policy_hrsshenanarticle_etl_callback",
    "policy_nyncthenanlist_callback",
    "policy_nyncthenanarticle_callback",
    "policy_nyncthenanarticle_etl_callback",
    "policy_hnjshenanlist_callback",
    "policy_hnjshenanarticle_callback",
    "policy_hnjshenanarticle_etl_callback",
    "policy_wsjkwhenanlist_callback",
    "policy_wsjkwhenanarticle_callback",
    "policy_wsjkwhenanarticle_etl_callback",
    "policy_zhengzhoulist_callback",
    "policy_zhengzhouarticle_callback",
    "policy_zhengzhouarticle_etl_callback",
    "policy_kaifenglist_callback",
    "policy_kaifengarticle_callback",
    "policy_kaifengarticle_etl_callback",
    "policy_lylist_callback",
    "policy_lyarticle_callback",
    "policy_lyarticle_etl_callback",
    "policy_pdslist_callback",
    "policy_pdsarticle_callback",
    "policy_pdsarticle_etl_callback",
    "policy_anyanglist_callback",
    "policy_anyangarticle_callback",
    "policy_anyangarticle_etl_callback",
    "policy_hebilist_callback",
    "policy_hebiarticle_callback",
    "policy_hebiarticle_etl_callback",
    "policy_xinxianglist_callback",
    "policy_xinxiangarticle_callback",
    "policy_xinxiangarticle_etl_callback",
    "policy_jiaozuolist_callback",
    "policy_jiaozuoarticle_callback",
    "policy_jiaozuoarticle_etl_callback",
    "policy_puyanglist_callback",
    "policy_puyangarticle_callback",
    "policy_puyangarticle_etl_callback",
    "policy_xuchanglist_callback",
    "policy_xuchangarticle_callback",
    "policy_xuchangarticle1_callback",
    "policy_xuchangarticle_etl_callback",
    "policy_xuchangarticle1_etl_callback",
    "policy_luohelist_callback",
    "policy_luohearticle_callback",
    "policy_luohearticle_etl_callback",
    "policy_smxlist_callback",
    "policy_smxarticle_callback",
    "policy_smxarticle_etl_callback",
    "policy_zhoukoulist_callback",
    "policy_zhoukouarticle_callback",
    "policy_zhoukouarticle_etl_callback",
    "policy_zhumadianlist_callback",
    "policy_zhumadianarticle_callback",
    "policy_zhumadianarticle_etl_callback",
    "policy_nanyanglist_callback",
    "policy_nanyangarticle_callback",
    "policy_nanyangarticle_etl_callback",
    "policy_xinyanglist_callback",
    "policy_xinyangarticle_callback",
    "policy_xinyangarticle_etl_callback",
    "policy_tjbhlist_callback",
    "policy_tjbharticle_callback",
    "policy_tjbharticle_etl_callback",
    "policy_fgwhunanlist1_callback",
    "policy_fgwhunanlist2_callback",
    "policy_fgwhunanarticle_callback",
    "policy_fgwhunanarticle_etl_callback",
    "policy_gxthunanlist1_callback",
    "policy_gxthunanlist2_callback",
    "policy_gxthunanarticle_callback",
    "policy_gxthunanarticle_etl_callback",
    "policy_kjthunanlist_callback",
    "policy_kjthunanarticle_callback",
    "policy_kjthunanarticle_etl_callback",
    "policy_jythunanlist_callback",
    "policy_jythunanarticle_callback",
    "policy_jythunanarticle_etl_callback",
    "policy_mzthunanlist_callback",
    "policy_mzthunanarticle_callback",
    "policy_mzthunanarticle_etl_callback",
    "policy_czthunanlist_callback",
    "policy_czthunanarticle_callback",
    "policy_czthunanarticle_etl_callback",
    "policy_rsthunanlist_callback",
    "policy_rsthunanarticle_callback",
    "policy_rsthunanarticle_etl_callback",
    "policy_agrihunanlist_callback",
    "policy_agrihunanarticle_callback",
    "policy_agrihunanarticle_etl_callback",
    "policy_wjwhunanlist_callback",
    "policy_wjwhunanarticle_callback",
    "policy_wjwhunanarticle_etl_callback",
    "policy_changshalist_callback",
    "policy_changshaarticle_callback",
    "policy_changshaarticle_etl_callback",
    "policy_zhuzhoulist_callback",
    "policy_zhuzhouarticle_callback",
    "policy_zhuzhouarticle_etl_callback",
    "policy_xiangtanlist_callback",
    "policy_xiangtanarticle_callback",
    "policy_xiangtanarticle_etl_callback",
    "policy_hengyanglist_callback",
    "policy_hengyangarticle_callback",
    "policy_hengyangarticle_etl_callback",
    "policy_shaoyanglist_callback",
    "policy_shaoyangarticle_callback",
    "policy_shaoyangarticle_etl_callback",
    "policy_yueyanglist_callback",
    "policy_yueyangarticle_callback",
    "policy_yueyangarticle_etl_callback",
    "policy_changdelist1_callback",
    "policy_changdelist2_callback",
    "policy_changdelist3_callback",
    "policy_changdearticle_callback",
    "policy_changdearticle_etl_callback",
    "policy_zjjlist1_callback",
    "policy_zjjlist2_callback",
    "policy_zjjarticle_callback",
    "policy_zjjarticle_etl_callback",
    "policy_yiyanglist_callback",
    "policy_yiyangarticle_callback",
    "policy_yiyangarticle_etl_callback",
    "policy_czslist_callback",
    "policy_czsarticle_callback",
    "policy_czsarticle_etl_callback",
    "policy_yzcitylist_callback",
    "policy_yzcityarticle_callback",
    "policy_yzcityarticle_etl_callback",
    "policy_huaihualist_callback",
    "policy_huaihuaarticle_callback",
    "policy_huaihuaarticle_etl_callback",
    "policy_hnloudilist1_callback",
    "policy_hnloudilist2_callback",
    "policy_hnloudiarticle_callback",
    "policy_hnloudiarticle_etl_callback",
    "policy_xxzlist_callback",
    "policy_xxzarticle_callback",
    "policy_xxzarticle_etl_callback",
    "policy_nythubeilist1_callback",
    "policy_nythubeilist2_callback",
    "policy_nythubeiarticle_callback",
    "policy_nythubeiarticle_etl_callback",
    "policy_huangshilist_callback",
    "policy_huangshiarticle_callback",
    "policy_huangshiarticle_etl_callback",
    "policy_xiangyanglist1_callback",
    "policy_xiangyanglist2_callback",
    "policy_xiangyangarticle_callback",
    "policy_xiangyangarticle_etl_callback",
    "policy_jingzhoulist1_callback",
    "policy_jingzhoulist2_callback",
    "policy_jingzhouarticle_callback",
    "policy_jingzhouarticle_etl_callback",
    "policy_shiyanlist1_callback",
    "policy_shiyanlist2_callback",
    "policy_shiyanarticle_callback",
    "policy_shiyanarticle_etl_callback",
    "policy_xiaoganlist1_callback",
    "policy_xiaoganlist2_callback",
    "policy_xiaoganarticle_callback",
    "policy_xiaoganarticle_etl_callback",
    "policy_jingmenlist1_callback",
    "policy_jingmenlist2_callback",
    "policy_jingmenarticle_callback",
    "policy_jingmenarticle_etl_callback",
    "policy_hglist_callback",
    "policy_hglist1_callback",
    "policy_hgarticle_callback",
    "policy_hgarticle_etl_callback",
    "policy_xianninglist1_callback",
    "policy_xianninglist2_callback",
    "policy_xianningarticle_callback",
    "policy_xianningarticle_etl_callback",
    "policy_suizhoulist_callback",
    "policy_suizhouarticle_callback",
    "policy_suizhouarticle_etl_callback",
    "policy_enshilist1_callback",
    "policy_enshilist2_callback",
    "policy_enshilist3_callback",
    "policy_enshiarticle_callback",
    "policy_enshiarticle_etl_callback",

    "policy_wuhanlist_callback",
    "policy_wuhanarticle_callback",
    "policy_wuhanarticle_etl_callback",
    "policy_yichanglist_callback",
    "policy_yichangarticle_callback",
    "policy_yichangarticle_etl_callback",
    "policy_yichanglist1_callback",
    "policy_yichangarticle1_callback",
    "policy_yichangarticle1_etl_callback",
    "policy_ezhoulist_callback",
    "policy_ezhoulist1_callback",
    "policy_ezhouarticle_callback",
    "policy_ezhouarticle_etl_callback",
    "policy_zjthunanlist_callback",
    "policy_zjthunanarticle_callback",
    "policy_zjthunanarticle_etl_callback",



    "policy_fgwhubeiarticle_etl_callback",
    "policy_jxthubeiarticle_etl_callback",
    "policy_kjthubeiarticle_etl_callback",
    "policy_jythubeiarticle_etl_callback",
    "policy_mzthubeiarticle_etl_callback",
    "policy_czthubeiarticle_etl_callback",
    "policy_rsthubeiarticle_etl_callback",
    "policy_zjthubeiarticle_etl_callback",
    "policy_wjwhubeiarticle_etl_callback",
]


def clean_text(text):
    # 用于取 : 之后的文本
    if text is not None:
        text = re.sub(".*[:：](.*)", "\\1", text)
        text = text.strip()
        return text
    else:
        return ""


def clean_organ(organ):
    if organ is not None:
        text = re.sub(".*[:：](.*)", "\\1", organ)
        if text.__contains__("if"):
            text = re.sub(".*write\(\"(.*?)\"\).*", "\\1", text)
        text = text.strip()
        return text
    else:
        return ""


def clean_pubdate(value):
    if not value:
        return ''
    value = re.sub('\D', '', value)
    value = value[:8]
    value = value.ljust(8, '0')
    if int(value[4:6]) > 12:
        value = value[:4] + '0000'
    if int(value[6:]) > 31:
        value = value[:6] + '00'
    return value


def cleaned(value):
    if value:
        if type(value) is list:
            return ' '.join([i.strip() for i in value]).strip()
        else:
            return value.strip()
    else:
        return ""


def judge_url(url):
    if len(url) > 500:
        return True
    if '/' not in url.replace('//', ''):
        return True
    if 'mailt' in url or 'data:image/' in url or 'javascript:' in url or '#' in url or 'weixin.qq' in url:
        return True
    if '.baidu' in url or '。' in url or '@163' in url or '.cn/）' in url or '8080）' in url or 'cn）' in url:
        return True
    if 'cn，' in url or 'com，' in url or 'cn,' in url or 'haosou.' in url or 'www.so.' in url or 'file://' in url:
        return True
    if 'C:' in url or 'baike.soso' in url or 'weibo.com' in url or 'baike.sogou' in url or 'html）' in url:
        return True
    if 'shtml）' in url or 'phtml）' in url or 'wx.qq.' in url or 'bing.com' in url:
        return True
    if url.endswith('/') or url.endswith('.net') or url.endswith('.asp') or url.endswith('.shtml'):
        return True
    if url.endswith('/share') or url.endswith('.exe') or url.endswith('.xml'):
        return True
    if url.endswith('pdf}') or url.endswith('jpg}'):
        return True
    ends = url.split('/')[-1].lower()
    if not ends:
        return True
    if ends.endswith('.htm') or ends.endswith('.shtml') or ends.endswith('.jhtml') or ends.endswith('.org'):
        return True
    if ends.endswith('xhtml') or ends.endswith('.phtml') or ends.endswith('.cn') or ends.endswith('.com'):
        return True
    if ends.endswith('.html') or ends.endswith('.mht') or ends.endswith('.html%20'):
        return True
    if '.jsp' in ends and len(ends.split('.', 1)[1]) < 7:
        return True

    return False


def get_file_info(data, res, xpath):
    url = data['provider_url']
    pub_year = data['pub_year']
    keyid = data['keyid']
    file_info = list()
    tag_list = res.xpath(f'{xpath}//a')
    url_list = list()
    for tag in tag_list:
        file_href = tag.xpath('@href').extract_first()
        if file_href and file_href.strip():
            file_href = file_href.strip()
            try:
                file_url = parse.urljoin(url, file_href)
            except:
                continue
            if judge_url(file_url):
                continue
            name = ''.join(tag.xpath('.//text()').extract()).strip()
            if file_url in url_list:
                continue
            else:
                url_list.append(file_url)
            file_info.append({'url': file_url, 'name': name, 'pub_year': pub_year, 'keyid': keyid})
    img_hrefs = res.xpath(f'{xpath}//*/@src').extract()
    for img_href in img_hrefs:
        if img_href.strip():
            img_href = img_href.strip()
            img_url = parse.urljoin(url, img_href)
            if judge_url(img_url):
                continue
            if img_url in url_list:
                continue
            else:
                url_list.append(img_url)
            file_info.append({'url': img_url, 'name': img_href, 'pub_year': pub_year, 'keyid': keyid})
    return file_info


def deal_sql_dict(sql_dict):
    sql_dict.pop("id")
    sql_dict.pop("update_time")
    sql_dict.pop("create_time")
    sql_dict.pop("null_dicts")
    sql_dict.pop("err_msg")
    sql_dict.pop("other_dicts")
    sql_dict.pop("state")
    sql_dict.pop("failcount")
    return sql_dict


def checkExist(obj):
    if obj != None and len(obj) > 0:
        return True
    else:
        return False


def isVaildDate(date):
    try:
        time.strptime(date, "%Y%m%d")
        return True
    except:
        return False


def cleanSemicolon(text):
    text = text.replace('；', ';')  # 全角分号转半角分号
    text = re.sub("\\s+;", ";", text)  # 去掉分号前的空白
    text = re.sub(";\\s+", ";", text)  # 去掉分号后的空白
    text = re.sub(";+", ";", text)  # 多个分号转一个分号
    text = re.sub("^;", "", text)  # 去掉最前面的分号
    text = re.sub(";$", "", text)  # 去掉最后面的分号
    text = text.strip()
    return text


def init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider):
    data = dict()
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = product
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = zt_provider
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'
    return data


def init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year):
    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    return full_text_data


def policy_fgwgxzflist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = res.xpath("//script[contains(text(),'createPageHTML')]/text()").extract_first()
        if page_info:
            tmps = re.findall(r'createPageHTML\((.*?)\);', page_info)
            if checkExist(tmps):
                tmps = tmps[0].split(",")
                total_page = int(tmps[0])
            else:
                total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": "index_{}.shtml".format(page)}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[contains(@class,"more-list")]/li')
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('./a/@href').get("").replace("./", "")
            base_url = r'http://fgw.gxzf.gov.cn/{}/{}'.format(callmodel.sql_model.list_rawid, list_json["page_info"])
            if href.startswith("/"):
                url = 'http://fgw.gxzf.gov.cn{}'.format(href)
            else:
                url = parse.urljoin(base_url, href)
            if 'shtml' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99382'
            article_json["url"] = url
            article_json["title"] = li.xpath('./a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('./span/text()').extract_first().replace('-', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_fgwgxzflist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:
        data = para_dicts["data"]["1_1"]['data']
        total_page = data["pager"]["pageCount"]
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        li_list = data["list"]
        for item in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = item.get("doc_pub_url", "")
            if 'shtml' not in url:
                continue
            tmp = item.get("f_202251240216", "")
            legal_status = ""
            if tmp == "0":
                legal_status = "有效"
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99382'
            article_json["legal_status"] = legal_status
            article_json["url"] = url
            article_json["title"] = item.get("f_2022512470353")
            article_json["pub_date"] = item.get("save_time").split(" ")[0].replace('-', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_fgwgxzfarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_fgwgxzfarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])
    src_data = down_model["1_1"]
    sel = Selector(src_data.html)

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = "99382"
    product = "FGWGXZF"
    zt_provider = "fgwgxzfgovpolicy"
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    data["provider_url"] = article_json['url']
    data["legal_status"] = article_json.get("legal_status", "")
    div_article = sel.xpath('//div[contains(@class,"article")]')
    title = div_article.xpath('./h1/text()')
    if checkExist(title):
        title = title.get("")
    else:
        title = article_json['title']
    data["title"] = cleanSemicolon(title)
    pub_date = article_json.get("pub_date", "")
    pub_year = ""
    if len(pub_date) == 8:
        pub_year = pub_date[0:4]
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year

    organ = ''.join(sel.xpath("//strong[contains(string(), '发文单位')]/parent::td[1]//text()").extract()).replace("发文单位：",
                                                                                                               "").strip()
    if organ.startswith('省'):
        organ = '广西' + organ
    data["organ"] = cleanSemicolon(organ)
    written_date = ''.join(sel.xpath("//strong[contains(string(), '成文日期')]/parent::td[1]//text()").extract()).replace(
        "成文日期：", "").strip()
    data['written_date'] = clean_pubdate(written_date)
    pub_no = ''.join(sel.xpath("//strong[contains(string(), '发文字号')]/parent::td[1]//text()").extract()).replace("发文字号：",
                                                                                                                "").strip()
    data["pub_no"] = cleanSemicolon(pub_no)

    fulltext_xpath = '//div[@class="article-con"]'
    fulltext = sel.xpath(fulltext_xpath).get("")
    if not checkExist(fulltext):
        raise Exception
    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, sel, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_gxtgxzflist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = res.xpath("//script[contains(text(),'createPageHTML')]/text()").extract_first()
        if page_info:
            tmps = re.findall(r'createPageHTML\((.*?)\);', page_info)
            if checkExist(tmps):
                tmps = tmps[0].split(",")
                total_page = int(tmps[0])
            else:
                total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": "index_{}.shtml".format(page)}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[contains(@class,"more-list")]/li')
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('./a/@href').get("").replace("./", "")
            base_url = r'http://gxt.gxzf.gov.cn/{}/{}'.format(callmodel.sql_model.list_rawid, list_json["page_info"])
            if href.startswith("/"):
                url = 'http://gxt.gxzf.gov.cn{}'.format(href)
            else:
                url = parse.urljoin(base_url, href)
            if 'shtml' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99383'
            article_json["url"] = url
            article_json["title"] = li.xpath('./a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('./span/text()').extract_first().replace('-', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_gxtgxzfarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_gxtgxzfarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])
    src_data = down_model["1_1"]
    sel = Selector(src_data.html)

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = "99383"
    product = "GXTGXZF"
    zt_provider = "gxtgxzfgovpolicy"
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    data["provider_url"] = article_json['url']
    data["legal_status"] = article_json.get("legal_status", "")
    div_article = sel.xpath('//div[contains(@class,"article")]')
    title = div_article.xpath('./h1/text()')
    if checkExist(title):
        title = title.get("")
    else:
        title = article_json['title']
    data["title"] = cleanSemicolon(title)
    pub_date = article_json.get("pub_date", "")
    pub_year = ""
    if len(pub_date) == 8:
        pub_year = pub_date[0:4]
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year
    organ = ''.join(sel.xpath("//strong[contains(string(), '发文单位')]/parent::td[1]//text()").extract()).replace("发文单位：",
                                                                                                               "").strip()
    if organ.startswith('省'):
        organ = '广西' + organ
    data["organ"] = cleanSemicolon(organ)
    written_date = ''.join(sel.xpath("//strong[contains(string(), '成文日期')]/parent::td[1]//text()").extract()).replace(
        "成文日期：", "").strip()
    data['written_date'] = clean_pubdate(written_date)
    pub_no = ''.join(sel.xpath("//strong[contains(string(), '发文字号')]/parent::td[1]//text()").extract()).replace("发文字号：",
                                                                                                                "").strip()
    data["pub_no"] = cleanSemicolon(pub_no)
    fulltext_xpath = '//div[@class="article-con"]'
    fulltext = sel.xpath(fulltext_xpath).get("")
    if not checkExist(fulltext):
        raise Exception
    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, sel, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_kjtgxzflist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = res.xpath("//script[contains(text(),'createPageHTML')]/text()").extract_first()
        if page_info:
            tmps = re.findall(r'createPageHTML\((.*?)\);', page_info)
            if checkExist(tmps):
                tmps = tmps[0].split(",")
                total_page = int(tmps[0])
            else:
                total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": "index_{}.shtml".format(page)}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[contains(@class,"more-list")]/li')
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('./a/@href').get("").replace("./", "")
            base_url = r'http://kjt.gxzf.gov.cn/{}/{}'.format(callmodel.sql_model.list_rawid, list_json["page_info"])
            if href.startswith("/"):
                url = 'http://kjt.gxzf.gov.cn{}'.format(href)
            else:
                url = parse.urljoin(base_url, href)
            if 'shtml' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99384'
            article_json["url"] = url
            article_json["title"] = li.xpath('./a/font/text()|./a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('./span/text()').extract_first().replace('-', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_kjtgxzfarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_kjtgxzfarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])
    src_data = down_model["1_1"]
    sel = Selector(src_data.html)

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = "99384"
    product = "KJTGXZF"
    zt_provider = "kjtgxzfgovpolicy"
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    data["provider_url"] = article_json['url']
    data["legal_status"] = article_json.get("legal_status", "")
    div_article = sel.xpath('//div[contains(@class,"article")]')
    title = div_article.xpath('./h1/text()')
    if checkExist(title):
        title = title.get("")
    else:
        title = article_json['title']
    data["title"] = cleanSemicolon(title)
    pub_date = article_json.get("pub_date", "")
    pub_year = ""
    if len(pub_date) == 8:
        pub_year = pub_date[0:4]
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year
    organ = ''.join(sel.xpath("//strong[contains(string(), '发文单位')]/parent::td[1]//text()").extract()).replace("发文单位：",
                                                                                                               "").strip()
    if organ.startswith('省'):
        organ = '广西' + organ
    data["organ"] = cleanSemicolon(organ)
    written_date = ''.join(sel.xpath("//strong[contains(string(), '成文日期')]/parent::td[1]//text()").extract()).replace(
        "成文日期：", "").strip()
    data['written_date'] = clean_pubdate(written_date)
    pub_no = ''.join(sel.xpath("//strong[contains(string(), '发文字号')]/parent::td[1]//text()").extract()).replace("发文字号：",
                                                                                                                "").strip()
    data["pub_no"] = cleanSemicolon(pub_no)
    fulltext_xpath = '//div[@class="article-con"]'
    fulltext = sel.xpath(fulltext_xpath).get("")
    if not checkExist(fulltext):
        raise Exception
    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, sel, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_jytgxzflist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = res.xpath("//script[contains(text(),'createPageHTML')]/text()").extract_first()
        if page_info:
            tmps = re.findall(r'createPageHTML\((.*?)\);', page_info)
            if checkExist(tmps):
                tmps = tmps[0].split(",")
                total_page = int(tmps[0])
            else:
                total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": "index_{}.shtml".format(page)}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        list_json = json.loads(callmodel.sql_model.list_json)
        li_list = res.xpath('//ul[contains(@class,"more-list")]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('./a/@href').get("").replace("./", "")
            base_url = r'http://jyt.gxzf.gov.cn/{}/{}'.format(callmodel.sql_model.list_rawid, list_json["page_info"])
            if href.startswith("/"):
                url = 'http://jyt.gxzf.gov.cn{}'.format(href)
            else:
                url = parse.urljoin(base_url, href)
            if 'shtml' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99385'
            article_json["url"] = url
            article_json["title"] = li.xpath('./a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('./span/text()').extract_first().replace('-', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_jytgxzfarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jytgxzfarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])
    src_data = down_model["1_1"]
    sel = Selector(src_data.html)

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = "99385"
    product = "JYTGXZF"
    zt_provider = "jytgxzfgovpolicy"
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    data["provider_url"] = article_json['url']
    data["legal_status"] = article_json.get("legal_status", "")
    div_article = sel.xpath('//div[contains(@class,"article")]')
    title = div_article.xpath('./h1/text()')
    if checkExist(title):
        title = title.get("")
    else:
        title = article_json['title']
    data["title"] = cleanSemicolon(title)
    pub_date = article_json.get("pub_date", "")
    pub_year = ""
    if len(pub_date) == 8:
        pub_year = pub_date[0:4]
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year
    organ = ''.join(sel.xpath("//strong[contains(string(), '发文单位')]/parent::td[1]//text()").extract()).replace("发文单位：",
                                                                                                               "").strip()
    if organ.startswith('省'):
        organ = '广西' + organ
    data["organ"] = cleanSemicolon(organ)
    written_date = ''.join(sel.xpath("//strong[contains(string(), '成文日期')]/parent::td[1]//text()").extract()).replace(
        "成文日期：", "").strip()
    data['written_date'] = clean_pubdate(written_date)
    pub_no = ''.join(sel.xpath("//strong[contains(string(), '发文字号')]/parent::td[1]//text()").extract()).replace("发文字号：",
                                                                                                                "").strip()
    data["pub_no"] = cleanSemicolon(pub_no)
    fulltext_xpath = '//div[@class="article-con"]'
    fulltext = sel.xpath(fulltext_xpath).get("")
    if not checkExist(fulltext):
        raise Exception
    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, sel, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_mztgxzflist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = res.xpath("//script[contains(text(),'createPageHTML')]/text()").extract_first()
        if page_info:
            tmps = re.findall(r'createPageHTML\((.*?)\);', page_info)
            if checkExist(tmps):
                tmps = tmps[0].split(",")
                total_page = int(tmps[0])
            else:
                total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": "index_{}.shtml".format(page)}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[contains(@class,"more-list")]/li')
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('./a/@href').get("").replace("./", "")
            base_url = r'http://mzt.gxzf.gov.cn/{}/{}'.format(callmodel.sql_model.list_rawid, list_json["page_info"])
            if href.startswith("/"):
                url = 'http://mzt.gxzf.gov.cn{}'.format(href)
            else:
                url = parse.urljoin(base_url, href)
            if 'shtml' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99386'
            article_json["url"] = url
            article_json["title"] = li.xpath('./a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('./span/text()').extract_first().replace('-', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_mztgxzfarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_mztgxzfarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])
    src_data = down_model["1_1"]
    sel = Selector(src_data.html)

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = "99386"
    product = "MZTGXZF"
    zt_provider = "mztgxzfgovpolicy"
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    data["provider_url"] = article_json['url']
    data["legal_status"] = article_json.get("legal_status", "")
    div_article = sel.xpath('//div[contains(@class,"article")]')
    title = div_article.xpath('./h1/text()')
    if checkExist(title):
        title = title.get("")
    else:
        title = article_json['title']
    data["title"] = cleanSemicolon(title)
    pub_date = clean_pubdate(article_json.get("pub_date", ""))
    pub_year = ""
    if len(pub_date) == 8:
        pub_year = pub_date[0:4]
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year
    organ = ''.join(sel.xpath("//strong[contains(string(), '发文单位')]/parent::td[1]//text()").extract()).replace("发文单位：",
                                                                                                               "").strip()
    if organ.startswith('省'):
        organ = '广西' + organ
    data["organ"] = cleanSemicolon(organ)
    written_date = ''.join(sel.xpath("//strong[contains(string(), '成文日期')]/parent::td[1]//text()").extract()).replace(
        "成文日期：", "").strip()
    data['written_date'] = clean_pubdate(written_date)
    pub_no = ''.join(sel.xpath("//strong[contains(string(), '发文字号')]/parent::td[1]//text()").extract()).replace("发文字号：",
                                                                                                                "").strip()
    data["pub_no"] = cleanSemicolon(pub_no)
    fulltext_xpath = '//div[@class="article-con"]'
    fulltext = sel.xpath(fulltext_xpath).get("")
    if not checkExist(fulltext):
        raise Exception
    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, sel, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_cztgxzflist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = res.xpath("//script[contains(text(),'createPageHTML')]/text()").extract_first()
        if page_info:
            tmps = re.findall(r'createPageHTML\((.*?)\);', page_info)
            if checkExist(tmps):
                tmps = tmps[0].split(",")
                total_page = int(tmps[0])
            else:
                total_page = 1

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": "index_{}.shtml".format(page)}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        list_json = json.loads(callmodel.sql_model.list_json)
        list_rawid = callmodel.sql_model.list_rawid
        if list_rawid != 'zfxxgkzl/zcwj/xzfgk':
            li_list = res.xpath('//ul[contains(@class,"txt-sub-list")]/li')
        else:
            li_list = res.xpath('//div[contains(@class,"zcwj-list")]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            if list_rawid != 'zfxxgkzl/zcwj/xzfgk':
                a_info = li.xpath('./a')
            else:
                a_info = li.xpath('./div[contains(@class,"zcwj-info")]/a')
            href = a_info.xpath('./@href').get("").replace("./", "")
            base_url = r'http://czt.gxzf.gov.cn/{}/{}'.format(callmodel.sql_model.list_rawid, list_json["page_info"])
            if href.startswith("/"):
                url = 'http://czt.gxzf.gov.cn{}'.format(href)
            else:
                url = parse.urljoin(base_url, href)
            if 'shtml' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99387'
            article_json["url"] = url
            article_json["title"] = a_info.xpath('./text()').extract_first().strip()
            pub_date = li.xpath('./span/text()').extract_first().replace('-', '').strip()
            if len(pub_date) != 8:
                pub_date = ""
            article_json["pub_date"] = pub_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_cztgxzflist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:
        data = para_dicts["data"]["1_1"]['data']
        total_page = data["pager"]["pageCount"]
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        li_list = data["list"]
        for item in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = item.get("doc_pub_url", "")
            if 'shtml' not in url:
                continue
            tmp = item.get("f_202251240216", "")
            legal_status = ""
            if tmp == "0":
                legal_status = "有效"
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99387'
            article_json["legal_status"] = legal_status
            article_json["url"] = url
            article_json["title"] = item.get("f_2022512470353")
            article_json["pub_date"] = item.get("save_time").split(" ")[0].replace('-', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_cztgxzfarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_cztgxzfarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])
    src_data = down_model["1_1"]
    sel = Selector(src_data.html)

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = "99387"
    product = "CZTGXZF"
    zt_provider = "cztgxzfgovpolicy"
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    data["provider_url"] = article_json['url']
    data["legal_status"] = article_json.get("legal_status", "")
    div_article = sel.xpath('//div[contains(@class,"article")]')
    title = div_article.xpath('./h1/text()')
    if checkExist(title):
        title = title.get("")
    else:
        title = article_json['title']
    data["title"] = cleanSemicolon(title)
    pub_date = article_json.get("pub_date", "")
    pub_year = ""
    if len(pub_date) == 8:
        pub_year = pub_date[0:4]
    else:
        tmp_pub = sel.xpath("//div[@class='article-inf-left']/text()").get("")
        pub_date = clean_pubdate(tmp_pub)
        pub_year = pub_date[0:4]
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year
    organ = ''.join(sel.xpath("//strong[contains(string(), '发文单位')]/parent::td[1]//text()").extract()).replace("发文单位：",
                                                                                                               "").strip()
    if organ.startswith('省'):
        organ = '广西' + organ
    data["organ"] = cleanSemicolon(organ)
    written_date = ''.join(sel.xpath("//strong[contains(string(), '成文日期')]/parent::td[1]//text()").extract()).replace(
        "成文日期：", "").strip()
    data['written_date'] = clean_pubdate(written_date)
    pub_no = ''.join(sel.xpath("//strong[contains(string(), '发文字号')]/parent::td[1]//text()").extract()).replace("发文字号：",
                                                                                                                "").strip()
    data["pub_no"] = cleanSemicolon(pub_no)
    fulltext_xpath = '//div[@class="article-con"]'
    fulltext = sel.xpath(fulltext_xpath).get("")
    if not checkExist(fulltext):
        raise Exception
    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, sel, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_rstgxzflist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = res.xpath("//script[contains(text(),'createPageHTML')]/text()").extract_first()
        if page_info:
            tmps = re.findall(r'createPageHTML\((.*?)\);', page_info)
            if checkExist(tmps):
                tmps = tmps[0].split(",")
                total_page = int(tmps[0])
            else:
                total_page = 1

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": "index_{}.shtml".format(page)}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        list_json = json.loads(callmodel.sql_model.list_json)
        li_list = res.xpath('//div[contains(@class,"catalog_bmt")]/ul/li')
        if not checkExist(li_list):
            li_list = res.xpath('//ul[contains(@class,"zfxxgk-more-list")]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            a_info = li.xpath('./a')
            href = a_info.xpath('./@href').get("")
            base_url = r'http://rst.gxzf.gov.cn/{}/{}'.format(callmodel.sql_model.list_rawid, list_json["page_info"])
            if href.startswith("/"):
                url = 'http://rst.gxzf.gov.cn{}'.format(href)
            else:
                url = parse.urljoin(base_url, href)
            if 'shtml' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99388'
            article_json["url"] = url
            article_json["title"] = a_info.xpath('./text()').extract_first().strip()
            pub_date = li.xpath('./span/text()').extract_first().replace('-', '').strip()
            if len(pub_date) != 8:
                pub_date = ""
            article_json["pub_date"] = pub_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_rstgxzfarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_rstgxzfarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])
    src_data = down_model["1_1"]
    sel = Selector(src_data.html)

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = "99388"
    product = "RSTGXZF"
    zt_provider = "rstgxzfgovpolicy"
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    data["provider_url"] = article_json['url']
    data["legal_status"] = article_json.get("legal_status", "")
    title = sel.xpath('//div[contains(@class,"conts_text")]/h3/text()')
    if checkExist(title):
        title = title.get("")
    else:
        title = article_json['title']
    data["title"] = cleanSemicolon(title)
    pub_date = article_json.get("pub_date", "")
    pub_year = ""
    if len(pub_date) == 8:
        pub_year = pub_date[0:4]
    else:
        tmp_pub = sel.xpath("//div[@class='conts_ly']/ul/li[1]/text()").get("")
        pub_date = clean_pubdate(tmp_pub)
        pub_year = pub_date[0:4]
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year
    organ = ''.join(sel.xpath("//strong[contains(string(), '发文单位')]/parent::td[1]//text()").extract()).replace("发文单位：",
                                                                                                               "").strip()
    if organ.startswith('省'):
        organ = '广西' + organ
    data["organ"] = cleanSemicolon(organ)
    written_date = ''.join(sel.xpath("//strong[contains(string(), '成文日期')]/parent::td[1]//text()").extract()).replace(
        "成文日期：", "").strip()
    data['written_date'] = clean_pubdate(written_date)
    pub_no = ''.join(sel.xpath("//strong[contains(string(), '发文字号')]/parent::td[1]//text()").extract()).replace("发文字号：",
                                                                                                                "").strip()
    data["pub_no"] = cleanSemicolon(pub_no)
    index_no = ''.join(sel.xpath("//strong[contains(string(), '索')]/parent::td[1]//text()").extract()).strip()
    if "：" in index_no:
        tmps = index_no.split("：")
        index_no = tmps[1].strip()
    data["index_no"] = cleanSemicolon(index_no)
    ls_str = ''.join(sel.xpath("//strong[contains(string(), '效力状态')]/parent::td[1]//text()").extract()).strip()
    tmps = re.findall(r"isok = '(.*?)'", ls_str)
    legal_status = ""
    if checkExist(tmps):
        if tmps[0] == '0' or tmps[0] == '有效':
            legal_status = "有效"
        elif tmps[0] == '1' or tmps[0] == '失效':
            legal_status = "失效"
    data["legal_status"] = cleanSemicolon(legal_status)
    fulltext_xpath = '//div[@id="details"]'
    fulltext = sel.xpath(fulltext_xpath).get("")
    if not checkExist(fulltext):
        raise Exception
    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, sel, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_nynctgxzflist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = res.xpath("//script[contains(text(),'createPageHTML')]/text()").extract_first()
        if page_info:
            tmps = re.findall(r'createPageHTML\((.*?)\);', page_info)
            if checkExist(tmps):
                tmps = tmps[0].split(",")
                total_page = int(tmps[0])
            else:
                total_page = 1

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": "index_{}.shtml".format(page)}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        list_json = json.loads(callmodel.sql_model.list_json)
        li_list = res.xpath('//ul[contains(@class,"ulthree")]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            a_info = li.xpath('./a')
            href = a_info.xpath('./@href').get("")
            base_url = r'http://nynct.gxzf.gov.cn/{}/{}'.format(callmodel.sql_model.list_rawid, list_json["page_info"])
            if href.startswith("/"):
                url = 'http://nynct.gxzf.gov.cn{}'.format(href)
            else:
                url = parse.urljoin(base_url, href)
            if 'shtml' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99389'
            article_json["url"] = url
            article_json["title"] = a_info.xpath('./text()').extract_first().strip()
            pub_date = li.xpath('./span/text()').extract_first().replace('.', '').strip()
            if len(pub_date) != 8:
                pub_date = ""
            article_json["pub_date"] = pub_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_nynctgxzflist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:
        html = para_dicts["data"]["1_1"]['html']
        ljson = json.loads(re.findall(r"jQuery18305377075502810842_1664264535917\((.*?)\);", html)[0])
        total_page = ljson["page"]["totalPages"]
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        li_list = ljson["page"]["content"]
        for item in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = item.get("DOCPUBURL", "")
            if not url or 'shtml' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99389'
            article_json["url"] = url
            article_json["title"] = item.get("DOCTITLE")
            article_json["pub_date"] = clean_pubdate(item.get("save_time"))
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_nynctgxzfarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_nynctgxzfarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])
    src_data = down_model["1_1"]
    sel = Selector(src_data.html)

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = "99389"
    product = "NYNCTGXZF"
    zt_provider = "nynctgxzfgovpolicy"
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    data["provider_url"] = article_json['url']
    data["legal_status"] = article_json.get("legal_status", "")
    title = sel.xpath('//div[contains(@class,"detailnews")]/h1/text()')
    if checkExist(title):
        title = title.get("")
    else:
        title = article_json['title']
    data["title"] = cleanSemicolon(title)
    pub_date = article_json.get("pub_date", "")
    pub_year = ""
    if len(pub_date) == 8:
        pub_year = pub_date[0:4]
    else:
        tmp_pub = sel.xpath("//div[@class='conts_ly']/ul/li[1]/text()").get("")
        pub_date = clean_pubdate(tmp_pub)
        pub_year = pub_date[0:4]
    if not pub_date:
        pub_date_info = ''.join(sel.xpath('//div[@class="detailbut"]//li[contains(text(),"日期：")]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    if not pub_date:
        raise Exception
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year
    organ = ''.join(sel.xpath("//strong[contains(string(), '发文单位')]/parent::td[1]//text()").extract()).replace("发文单位：",
                                                                                                               "").strip()
    if organ.startswith('省'):
        organ = '广西' + organ
    data["organ"] = cleanSemicolon(organ)
    written_date = ''.join(sel.xpath("//strong[contains(string(), '成文日期')]/parent::td[1]//text()").extract()).replace(
        "成文日期：", "").strip()
    data['written_date'] = clean_pubdate(written_date)
    pub_no = ''.join(sel.xpath("//strong[contains(string(), '发文字号')]/parent::td[1]//text()").extract()).replace("发文字号：",
                                                                                                                "").strip()
    data["pub_no"] = cleanSemicolon(pub_no)
    index_no = ''.join(sel.xpath("//strong[contains(string(), '索')]/parent::td[1]//text()").extract()).strip()
    if "：" in index_no:
        tmps = index_no.split("：")
        index_no = tmps[1].strip()
    data["index_no"] = cleanSemicolon(index_no)
    ls_str = ''.join(sel.xpath("//strong[contains(string(), '效力状态')]/parent::td[1]//text()").extract()).strip()
    tmps = re.findall(r"isok = '(.*?)'", ls_str)
    legal_status = ""
    if checkExist(tmps):
        if tmps[0] == '0' or tmps[0] == '有效':
            legal_status = "有效"
        elif tmps[0] == '1' or tmps[0] == '失效':
            legal_status = "失效"
    data["legal_status"] = cleanSemicolon(legal_status)
    fulltext_xpath = '//div[@id="mcontent"]'
    fulltext = sel.xpath(fulltext_xpath).get("")
    if not checkExist(fulltext):
        raise Exception
    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, sel, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_zjtgxzflist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = res.xpath("//script[contains(text(),'createPageHTML')]/text()").extract_first()
        if page_info:
            tmps = re.findall(r'createPageHTML\((.*?)\);', page_info)
            if checkExist(tmps):
                tmps = tmps[0].split(",")
                total_page = int(tmps[0])
            else:
                total_page = 1

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": "index_{}.shtml".format(page)}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        list_json = json.loads(callmodel.sql_model.list_json)
        li_list = res.xpath('//ul[contains(@class,"more-list")]/li')
        if not checkExist(li_list):
            li_list = res.xpath('//ul[contains(@class,"zfxxgk-more-list")]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            a_info = li.xpath('./a')
            href = a_info.xpath('./@href').get("")
            base_url = r'http://zjt.gxzf.gov.cn/{}/{}'.format(callmodel.sql_model.list_rawid, list_json["page_info"])
            if href.startswith("/"):
                url = 'http://zjt.gxzf.gov.cn{}'.format(href)
            else:
                url = parse.urljoin(base_url, href)
            if 'shtml' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99390'
            article_json["url"] = url
            article_json["title"] = a_info.xpath('./text()').extract_first().strip()
            pub_date = clean_pubdate(li.xpath('./span/text()').extract_first())
            if len(pub_date) != 8:
                pub_date = ""
            article_json["pub_date"] = pub_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_zjtgxzfarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_zjtgxzfarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])
    src_data = down_model["1_1"]
    sel = Selector(src_data.html)

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = "99390"
    product = "ZJTGXZF"
    zt_provider = "zjtgxzfgovpolicy"
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    data["provider_url"] = article_json['url']
    title = sel.xpath('//div[contains(@class,"article")]/h1/text()')
    if checkExist(title):
        title = title.get("")
    else:
        title = article_json['title']
    data["title"] = cleanSemicolon(title)
    pub_date = article_json.get("pub_date", "")
    pub_year = ""
    if len(pub_date) == 8:
        pub_year = pub_date[0:4]
    else:
        tmp_pub = sel.xpath("//div[@class='conts_ly']/ul/li[1]/text()").get("")
        pub_date = clean_pubdate(tmp_pub)
        pub_year = pub_date[0:4]
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year
    fulltext_xpath = '//div[@class="article-con"]'
    fulltext = sel.xpath(fulltext_xpath).get("")
    if not checkExist(fulltext):
        fulltext_xpath = '//div[@class="scroll-wrap"]'
        fulltext = sel.xpath(fulltext_xpath).get("")
        if not checkExist(fulltext):
            raise Exception
    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, sel, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_wsjkwgxzflist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = res.xpath("//script[contains(text(),'createPageHTML')]/text()").extract_first()
        if page_info:
            tmps = re.findall(r'createPageHTML\((.*?)\);', page_info)
            if checkExist(tmps):
                tmps = tmps[0].split(",")
                total_page = int(tmps[0])
            else:
                total_page = 1

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": "index_{}.shtml".format(page)}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        list_json = json.loads(callmodel.sql_model.list_json)
        li_list = res.xpath('//ul[contains(@class,"more-list")]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            a_info = li.xpath('./a')
            href = a_info.xpath('./@href').get("")
            base_url = r'http://wsjkw.gxzf.gov.cn/{}/{}'.format(callmodel.sql_model.list_rawid, list_json["page_info"])
            if href.startswith("/"):
                url = 'http://wsjkw.gxzf.gov.cn{}'.format(href)
            else:
                url = parse.urljoin(base_url, href)
            if 'shtml' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99391'
            article_json["url"] = url
            article_json["title"] = a_info.xpath('./text()').extract_first().strip()
            pub_date = clean_pubdate(li.xpath('./span/text()').extract_first())
            if len(pub_date) != 8:
                pub_date = ""
            article_json["pub_date"] = pub_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_wsjkwgxzfarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_wsjkwgxzfarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])
    src_data = down_model["1_1"]
    sel = Selector(src_data.html)

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = "99391"
    product = "WSJKWGXZF"
    zt_provider = "wsjkwgxzfgovpolicy"
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    data["provider_url"] = article_json['url']
    data["legal_status"] = article_json.get("legal_status", "")
    title = sel.xpath('//div[contains(@class,"article-re")]/h1/text()')
    if checkExist(title):
        title = title.get("")
    else:
        title = article_json['title']
    data["title"] = cleanSemicolon(title)
    pub_date = article_json.get("pub_date", "")
    pub_year = ""
    if len(pub_date) == 8:
        pub_year = pub_date[0:4]
    else:
        tmp_pub = sel.xpath("//div[@class='conts_ly']/ul/li[1]/text()").get("")
        pub_date = clean_pubdate(tmp_pub)
        pub_year = pub_date[0:4]
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year
    organ = ''.join(sel.xpath("//strong[contains(string(), '发文单位')]/parent::td[1]//text()").extract()).replace("发文单位：",
                                                                                                               "").strip()
    if organ.startswith('省'):
        organ = '广西' + organ
    data["organ"] = cleanSemicolon(organ)
    written_date = ''.join(sel.xpath("//strong[contains(string(), '成文日期')]/parent::td[1]//text()").extract()).replace(
        "成文日期：", "").strip()
    data['written_date'] = clean_pubdate(written_date)
    pub_no = ''.join(sel.xpath("//strong[contains(string(), '发文字号')]/parent::td[1]//text()").extract()).replace("发文字号：",
                                                                                                                "").strip()
    data["pub_no"] = cleanSemicolon(pub_no)
    index_no = ''.join(sel.xpath("//strong[contains(string(), '索')]/parent::td[1]//text()").extract()).strip()
    if "：" in index_no:
        tmps = index_no.split("：")
        index_no = tmps[1].strip()
    data["index_no"] = cleanSemicolon(index_no)
    ls_str = ''.join(sel.xpath("//strong[contains(string(), '效力状态')]/parent::td[1]//text()").extract()).strip()
    tmps = re.findall(r"isok = '(.*?)'", ls_str)
    legal_status = ""
    if checkExist(tmps):
        if tmps[0] == '0' or tmps[0] == '有效':
            legal_status = "有效"
        elif tmps[0] == '1' or tmps[0] == '失效':
            legal_status = "失效"
    data["legal_status"] = cleanSemicolon(legal_status)
    fulltext_xpath = '//div[@class="article-con"]'
    fulltext = sel.xpath(fulltext_xpath).get("")
    if not checkExist(fulltext):
        raise Exception
    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, sel, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_nanninglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = res.xpath("//script[contains(text(),'createPageHTML')]/text()").extract_first()
        if page_info:
            tmps = re.findall(r'createPageHTML\((.*?)\);', page_info)
            if checkExist(tmps):
                tmps = tmps[0].split(",")
                total_page = int(tmps[0])
            else:
                total_page = 1
        list_json = json.loads(callmodel.sql_model.list_json)
        list_rawid = callmodel.sql_model.list_rawid
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                page_info = "index_{}.html".format(page)
                if 'jdhy/zcjd' in list_rawid:
                    page_info = "wzjd_{}.html".format(page)
                dic = {"page_info": page_info}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = res.xpath('//ul[contains(@class,"has_line")]/li')
        if not checkExist(li_list):
            li_list = res.xpath('//div[contains(@class,"zcjd-list")]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            a_info = li.xpath('./a')
            href = a_info.xpath('./@href').get("")
            base_url = r'https://www.nanning.gov.cn/{}/{}'.format(callmodel.sql_model.list_rawid,
                                                                  list_json["page_info"])
            if href.startswith("/"):
                url = 'https://www.nanning.gov.cn{}'.format(href)
            else:
                url = parse.urljoin(base_url, href)
            if 'html' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99392'
            article_json["url"] = url
            article_json["title"] = a_info.xpath('./text()').extract_first().strip()
            pub_date = li.xpath("./span[@class='time']/text()").extract_first()
            if not checkExist(pub_date):
                pub_date = li.xpath('./span/text()').extract_first()
            pub_date = clean_pubdate(pub_date)
            if len(pub_date) != 8:
                pub_date = ""
            article_json["pub_date"] = pub_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)

            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_nanninglist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:
        html = para_dicts["data"]["1_1"]['html']
        ljson = eval(re.findall(r"CallBack\(([\s\S]*?)\);", html)[0])
        total_page = ljson["page"]["totalPages"]
        page_index = int(callmodel.sql_model.page_index)
        list_rawid = callmodel.sql_model.list_rawid
        list_json = json.loads(callmodel.sql_model.list_json)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": page_info.replace("page=1", "page={}".format(page))}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        li_list = ljson["page"]["content"]
        for item in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = item.get("DOCPUBURL", "")
            if not url or 'html' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99392'
            article_json["url"] = url
            article_json["title"] = item.get("DOCTITLE")
            article_json["pub_date"] = clean_pubdate(item.get("PUBDATE", ""))
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_nanningarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_nanningarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])
    src_data = down_model["1_1"]
    sel = Selector(src_data.html)

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = "99392"
    product = "NANNING"
    zt_provider = "nanninggovpolicy"
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    data["provider_url"] = article_json['url']
    data["legal_status"] = article_json.get("legal_status", "")
    title = sel.xpath('//div[contains(@id,"ucap-title")]/h2/text()')
    if checkExist(title):
        title = title.get("")
    else:
        title = article_json['title']
    data["title"] = cleanSemicolon(title)
    pub_date = article_json.get("pub_date", "")
    pub_year = ""
    if len(pub_date) == 8:
        pub_year = pub_date[0:4]
    else:
        tmp_pub = sel.xpath("//div[@class='conts_ly']/ul/li[1]/text()").get("")
        pub_date = clean_pubdate(tmp_pub)
        pub_year = pub_date[0:4]
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year

    organ = ''.join(sel.xpath("//strong[contains(string(), '发文机关')]/parent::td[1]//text()").extract()).replace("发文机关：",
                                                                                                               "").strip()
    if organ.startswith('省'):
        organ = '广西' + organ
    data["organ"] = cleanSemicolon(organ)
    written_date = ''.join(sel.xpath("//strong[contains(string(), '成文日期')]/parent::td[1]//text()").extract()).replace(
        "成文日期：", "").strip()
    data['written_date'] = clean_pubdate(written_date)
    pub_no = ''.join(sel.xpath("//strong[contains(string(), '发文字号')]/parent::td[1]//text()").extract()).replace("发文字号：",
                                                                                                                "").strip()
    data["pub_no"] = cleanSemicolon(pub_no)
    index_no = ''.join(sel.xpath("//strong[contains(string(), '索')]/parent::td[1]//text()").extract()).strip()
    if "：" in index_no:
        tmps = index_no.split("：")
        index_no = tmps[1].strip()
    data["index_no"] = cleanSemicolon(index_no)
    legal_status = ''.join(sel.xpath("//strong[contains(string(), '效力状态')]/parent::td[1]//text()").extract()).replace(
        "效力状态：", "").strip()
    data["legal_status"] = cleanSemicolon(legal_status)

    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = sel.xpath(fulltext_xpath).get("")
    if not checkExist(fulltext):
        raise Exception
    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, sel, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_liuzhoulist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = res.xpath("//script[contains(text(),'createPageHTML')]/text()").extract_first()
        if page_info:
            tmps = re.findall(r'createPageHTML\((.*?)\);', page_info)
            if checkExist(tmps):
                tmps = tmps[0].split(",")
                total_page = int(tmps[0])
            else:
                total_page = 1
        list_json = json.loads(callmodel.sql_model.list_json)
        list_rawid = callmodel.sql_model.list_rawid
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                page_info = "index_{}.shtml".format(page)
                if 'fdzdgknr' in list_rawid:
                    page_info = "lists_{}.shtml".format(page)
                dic = {"page_info": page_info}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = res.xpath('//ul[contains(@class,"list-group")]/li')
        if not checkExist(li_list):
            li_list = res.xpath('//ul[contains(@class,"systemList")]/li')
            print(page_info)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            a_info = li.xpath('./a')
            if not checkExist(a_info):
                a_info = li.xpath('./div//a')
            href = a_info.xpath('./@href').get("")
            base_url = r'http://www.liuzhou.gov.cn/{}/{}'.format(callmodel.sql_model.list_rawid, list_json["page_info"])
            if href.startswith("/"):
                url = 'http://www.liuzhou.gov.cn{}'.format(href)
            else:
                url = parse.urljoin(base_url, href)
            if 'html' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99393'
            article_json["url"] = url
            article_json["title"] = a_info.xpath('./text()').extract_first().strip()
            pub_date = li.xpath("./span[@class='time']/text()").extract_first()
            if not checkExist(pub_date):
                pub_date = li.xpath("//div[@class='layout-fixed']/text()").extract_first()
            pub_date = clean_pubdate(pub_date)
            if len(pub_date) != 8:
                pub_date = ""
            article_json["pub_date"] = pub_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_liuzhoulist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:
        html = para_dicts["data"]["1_1"]['html']
        ljson = eval(re.findall(r"CallBack\(([\s\S]*?)\);", html)[0])
        total_page = ljson["page"]["totalPages"]
        page_index = int(callmodel.sql_model.page_index)
        list_rawid = callmodel.sql_model.list_rawid
        list_json = json.loads(callmodel.sql_model.list_json)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": page_info.replace("page=1", "page={}".format(page))}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        li_list = ljson["page"]["content"]
        for item in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = item.get("DOCPUBURL", "")
            if not url or 'html' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99393'
            article_json["url"] = url
            article_json["title"] = item.get("DOCTITLE")
            article_json["pub_date"] = clean_pubdate(item.get("PUBDATE", ""))
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_liuzhouarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_liuzhouarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])
    src_data = down_model["1_1"]
    sel = Selector(src_data.html)

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = "99393"
    product = "LIUZHOU"
    zt_provider = "liuzhougovpolicy"
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    data["provider_url"] = article_json['url']
    title = sel.xpath('//div[contains(@class,"text-center")]/h3/text()')
    if checkExist(title):
        title = title.get("")
    else:
        title = article_json['title']
    data["title"] = cleanSemicolon(title)
    pub_date = article_json.get("pub_date", "")
    pub_year = ""
    if len(pub_date) == 8:
        pub_year = pub_date[0:4]
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year
    organ = ''.join(
        sel.xpath("//div[@id='PC']//dt[contains(string(), '发文单位')]/parent::dl[1]//text()").extract()).replace("发文单位：",
                                                                                                              "").strip()
    if organ.startswith('省'):
        organ = '广西' + organ
    data["organ"] = cleanSemicolon(organ)
    written_date = ''.join(
        sel.xpath("//div[@id='PC']//dt[contains(string(), '成文日期')]/parent::dl[1]//text()").extract()).replace("成文日期：",
                                                                                                              "").strip()
    data['written_date'] = clean_pubdate(written_date)
    pub_no = ''.join(
        sel.xpath("//div[@id='PC']//dt[contains(string(), '发文字号')]/parent::dl[1]//text()").extract()).replace("发文字号：",
                                                                                                              "").strip()
    data["pub_no"] = cleanSemicolon(pub_no)
    index_no = ''.join(
        sel.xpath("//div[@id='PC']//dt[contains(string(), '索')]/parent::dl[1]//text()").extract()).strip()
    if "：" in index_no:
        tmps = index_no.split("：")
        index_no = tmps[1].strip()
    data["index_no"] = cleanSemicolon(index_no)
    ls_str = ''.join(
        sel.xpath("//div[@id='PC']//dt[contains(string(), '效力状态')]/parent::dl[1]//text()").extract()).strip()
    tmps = re.findall(r"isok='(.*?)'", ls_str)
    legal_status = ""
    if checkExist(tmps):
        if tmps[0] == '0' or tmps[0] == '有效':
            legal_status = "有效"
        elif tmps[0] == '1' or tmps[0] == '失效':
            legal_status = "失效"
    data["legal_status"] = cleanSemicolon(legal_status)
    fulltext_xpath = "//div[@class='contentTextBox']"
    fulltext = sel.xpath(fulltext_xpath).get("")
    if not checkExist(fulltext):
        raise Exception
    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, sel, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_guilinlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = res.xpath("//script[contains(text(),'createPageHTML')]/text()").extract_first()
        if page_info:
            tmps = re.findall(r'createPageHTML\((.*?)\);', page_info)
            if checkExist(tmps):
                tmps = tmps[0].split(",")
                total_page = int(tmps[0])
            else:
                total_page = 1
        list_json = json.loads(callmodel.sql_model.list_json)
        list_rawid = callmodel.sql_model.list_rawid
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                page_info = "index_{}.shtml".format(page)
                dic = {"page_info": page_info}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = res.xpath('//ul[contains(@class,"base-tab-info-list")]/li')
        if not checkExist(li_list):
            li_list = res.xpath('//ul[contains(@class,"info-list")]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            a_info = li.xpath('./a')
            href = a_info.xpath('./@href').get("")
            base_url = r'https://www.guilin.gov.cn/{}/{}'.format(callmodel.sql_model.list_rawid, list_json["page_info"])
            if href.startswith("/"):
                url = 'https://www.guilin.gov.cn{}'.format(href)
            else:
                url = parse.urljoin(base_url, href)
            if 'html' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99394'
            article_json["url"] = url
            article_json["title"] = a_info.xpath('./text()').extract_first().strip()
            pub_date = li.xpath("./span/text()").extract_first()
            if not checkExist(pub_date):
                pub_date = a_info.xpath("./span/text()").extract_first()
            pub_date = clean_pubdate(pub_date)
            if len(pub_date) != 8:
                pub_date = ""
            article_json["pub_date"] = pub_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_guilinlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:
        html = para_dicts["data"]["1_1"]['html']
        ljson = eval(re.findall(r"CallBack\(([\s\S]*?)\);", html)[0])
        total_page = ljson["page"]["totalPages"]
        page_index = int(callmodel.sql_model.page_index)
        list_rawid = callmodel.sql_model.list_rawid
        list_json = json.loads(callmodel.sql_model.list_json)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": page_info.replace("page=1", "page={}".format(page))}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        li_list = ljson["page"]["content"]
        for item in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = item.get("DOCPUBURL", "")
            if not url or 'html' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99394'
            article_json["url"] = url
            article_json["title"] = item.get("DOCTITLE")
            article_json["pub_date"] = clean_pubdate(item.get("PUBDATE", ""))
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_guilinarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_guilinarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])
    src_data = down_model["1_1"]
    sel = Selector(src_data.html)

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = "99394"
    product = "GUILIN"
    zt_provider = "guilingovpolicy"
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    data["provider_url"] = article_json['url']
    title = sel.xpath("//div[@class='header']/h1/p/text()")
    if checkExist(title):
        title = ''.join(title.extract())
    else:
        title = sel.xpath('//div[contains(@class,"detail-title")]/text()')
        if checkExist(title):
            title = title.get("")
        else:
            title = article_json['title']
    data["title"] = cleanSemicolon(title)
    pub_date = article_json.get("pub_date", "")
    pub_year = ""
    if len(pub_date) == 8:
        pub_year = pub_date[0:4]
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year
    organ = ''.join(sel.xpath("//li[contains(string(), '发文机构')]//text()").extract()).replace("[发文机构]", "").strip()
    if organ.startswith('省'):
        organ = '广西' + organ
    data["organ"] = cleanSemicolon(organ)
    written_date = ''.join(sel.xpath("//li[contains(string(), '成文日期')]//text()").extract()).replace("[成文日期]",
                                                                                                    "").strip()
    data['written_date'] = clean_pubdate(written_date)
    pub_no = ''.join(sel.xpath("//li[contains(string(), '发文字号')]//text()").extract()).replace("[发文字号]", "").strip()
    data["pub_no"] = cleanSemicolon(pub_no)
    index_no = ''.join(sel.xpath("//li[contains(string(), '索')]//text()").extract()).strip()
    if "]" in index_no:
        tmps = index_no.split("]")
        index_no = tmps[1].strip()
    data["index_no"] = cleanSemicolon(index_no)
    legal_status = ""
    ls_str = ''.join(sel.xpath("//li[contains(string(), '有')]//text()").extract()).strip()
    if "]" in ls_str:
        tmps = ls_str.split("]")
        legal_status = tmps[1].strip()
    data["legal_status"] = cleanSemicolon(legal_status)
    fulltext_xpath = "//div[@class='mainTextBox']"
    fulltext = sel.xpath(fulltext_xpath).get("")
    if not checkExist(fulltext):
        fulltext_xpath = "//div[@id='viewMainSection']"
        fulltext = sel.xpath(fulltext_xpath).get("")
        if not checkExist(fulltext):
            raise Exception
    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, sel, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_wuzhoulist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = res.xpath("//script[contains(text(),'createPageHTML')]/text()").extract_first()
        if page_info:
            tmps = re.findall(r'createPageHTML\((.*?)\);', page_info)
            if checkExist(tmps):
                tmps = tmps[0].split(",")
                total_page = int(tmps[0])
            else:
                total_page = 1
        list_json = json.loads(callmodel.sql_model.list_json)
        list_rawid = callmodel.sql_model.list_rawid
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                page_info = "index_{}.shtml".format(page)
                dic = {"page_info": page_info}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = res.xpath('//ul[contains(@class,"more-list")]/li')
        if not checkExist(li_list):
            li_list = res.xpath('//ul[contains(@class,"info-list")]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            a_info = li.xpath('./a')
            href = a_info.xpath('./@href').get("")
            base_url = r'http://www.wuzhou.gov.cn/{}/{}'.format(callmodel.sql_model.list_rawid, list_json["page_info"])
            if href.startswith("/"):
                url = 'http://www.wuzhou.gov.cn{}'.format(href)
            else:
                url = parse.urljoin(base_url, href)
            if 'html' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99395'
            article_json["url"] = url
            article_json["title"] = a_info.xpath('./text()').extract_first().strip()
            pub_date = li.xpath("./span/text()").extract_first()
            pub_date = clean_pubdate(pub_date)
            if len(pub_date) != 8:
                pub_date = ""
            article_json["pub_date"] = pub_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_wuzhoulist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:
        data = para_dicts["data"]["1_1"]['data']
        total_page = data["pager"]["pageCount"]
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        li_list = data["list"]
        for item in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = item.get("doc_pub_url", "")
            if 'shtml' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99395'
            article_json["url"] = url
            article_json["title"] = item.get("f_202183157605")
            article_json["pub_date"] = clean_pubdate(item.get("save_time"))
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_wuzhouarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_wuzhouarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])
    src_data = down_model["1_1"]
    sel = Selector(src_data.html)

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = "99395"
    product = "WUZHOU"
    zt_provider = "wuzhougovpolicy"
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    data["provider_url"] = article_json['url']
    title = sel.xpath("//div[@class='article']/h1/text()")
    if checkExist(title):
        title = ''.join(title.extract())
    else:
        title = article_json['title']
    data["title"] = cleanSemicolon(title)
    pub_date = article_json.get("pub_date", "")
    pub_year = ""
    if len(pub_date) == 8:
        pub_year = pub_date[0:4]
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year
    organ = ''.join(sel.xpath("//strong[contains(string(), '发文单位')]/parent::td[1]//text()").extract()).replace("发文单位：",
                                                                                                               "").strip()
    if organ.startswith('省'):
        organ = '广西' + organ
    data["organ"] = cleanSemicolon(organ)
    written_date = ''.join(sel.xpath("//strong[contains(string(), '印发日期')]/parent::td[1]//text()").extract()).replace(
        "印发日期：", "").strip()
    data['written_date'] = clean_pubdate(written_date)
    pub_no = ''.join(sel.xpath("//strong[contains(string(), '发文字号')]/parent::td[1]//text()").extract()).replace("发文字号：",
                                                                                                                "").strip()
    data["pub_no"] = cleanSemicolon(pub_no)
    index_no = ''.join(sel.xpath("//strong[contains(string(), '索')]/parent::td[1]//text()").extract()).strip()
    if "：" in index_no:
        tmps = index_no.split("：")
        index_no = tmps[1].strip()
    data["index_no"] = cleanSemicolon(index_no)
    ls_str = ''.join(sel.xpath("//strong[contains(string(), '效力状态')]/parent::td[1]//text()").extract()).replace("效力状态：",
                                                                                                                "").strip()
    tmps = re.findall(r"isok='(.*?)'", ls_str)
    legal_status = ""
    if checkExist(tmps):
        if tmps[0] == '0' or tmps[0] == '有效':
            legal_status = "有效"
        elif tmps[0] == '1' or tmps[0] == '失效':
            legal_status = "失效"
    data["legal_status"] = cleanSemicolon(legal_status)
    fulltext_xpath = "//div[@class='article-con']"
    fulltext = sel.xpath(fulltext_xpath).get("")
    if not checkExist(fulltext):
        fulltext_xpath = "//div[@id='viewMainSection']"
        fulltext = sel.xpath(fulltext_xpath).get("")
        if not checkExist(fulltext):
            raise Exception
    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, sel, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_beihailist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            max_count = re.findall("createPageHTML\((\d+)", para_dicts["data"]["1_1"]['html'])
            max_count = int(max_count[0]) if max_count else 1
            total_page = max_count
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        list_json = json.loads(callmodel.sql_model.list_json)
        list_rawid = callmodel.sql_model.list_rawid
        li_list = res.xpath('//ul[@class="more-list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = r'http://www.beihai.gov.cn/{}/index.shtml'.format(callmodel.sql_model.list_rawid)
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99396'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            pub_date = li.xpath('span/text()').extract_first().strip()
            pub_date = clean_pubdate(pub_date)
            if len(pub_date) != 8:
                pub_date = ""
            article_json["pub_date"] = pub_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_beihaiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_beihaiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="article"]/h1//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="people-desc"]//strong[contains(text(),"发文字号")]/parent::td[1]//text()').extract()).strip()
    pub_no = pub_no.split('：')[-1].strip()
    written_date = ''.join(res.xpath('//div[@class="people-desc"]//strong[contains(text(),"成文日期")]/parent::td[1]//text()').extract()).strip()
    written_date = written_date.split('：')[-1].strip()
    organ = ''.join(res.xpath('//div[@class="people-desc"]//strong[contains(text(),"发文单位")]/parent::td[1]//text()').extract()).strip()
    organ = organ.split('：')[-1].strip()
    if organ.startswith('市'):
        organ = '北海' + organ

    fulltext_xpath = '//div[@class="article-con"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99396'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "BEIHAI"
    zt_provider = "beihaigovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    # data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_chongzuolist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = res.xpath("//script[contains(text(),'createPageHTML')]/text()").extract_first()
        if page_info:
            tmps = re.findall(r'createPageHTML\((.*?)\);', page_info)
            if checkExist(tmps):
                tmps = tmps[0].split(",")
                total_page = int(tmps[0])
            else:
                total_page = 1
        list_json = json.loads(callmodel.sql_model.list_json)
        list_rawid = callmodel.sql_model.list_rawid
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                page_info = "index_{}.shtml".format(page)
                dic = {"page_info": page_info}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = res.xpath('//ul[contains(@class,"more-list")]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            a_info = li.xpath('./a')
            href = a_info.xpath('./@href').get("")
            base_url = r'http://www.chongzuo.gov.cn/{}/{}'.format(callmodel.sql_model.list_rawid,
                                                                  list_json["page_info"])
            if href.startswith("/"):
                url = 'http://www.chongzuo.gov.cn{}'.format(href)
            else:
                url = parse.urljoin(base_url, href)
            if 'html' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99397'
            article_json["url"] = url
            article_json["title"] = a_info.xpath('./text()').extract_first().strip()
            pub_date = li.xpath("./span/text()").extract_first()
            pub_date = clean_pubdate(pub_date)
            if len(pub_date) != 8:
                pub_date = ""
            article_json["pub_date"] = pub_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_chongzuolist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:
        data = para_dicts["data"]["1_1"]['data']
        total_page = data["pager"]["pageCount"]
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        li_list = data["list"]
        for item in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = item.get("doc_pub_url", "")
            if 'shtml' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99397'
            article_json["url"] = url
            article_json["title"] = item.get("f_2022512470353")
            article_json["pub_date"] = clean_pubdate(item.get("save_time"))
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_chongzuoarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_chongzuoarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])
    src_data = down_model["1_1"]
    sel = Selector(src_data.html)

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = "99397"
    product = "CHONGZUO"
    zt_provider = "chongzuogovpolicy"
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    data["provider_url"] = article_json['url']
    title = sel.xpath("//div[@class='article']/h1/text()")
    if checkExist(title):
        title = ''.join(title.extract())
    else:
        title = article_json['title']
    data["title"] = cleanSemicolon(title)
    pub_date = article_json.get("pub_date", "")
    pub_year = ""
    if len(pub_date) == 8:
        pub_year = pub_date[0:4]
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year
    organ = ''.join(sel.xpath("//strong[contains(string(), '发文单位')]/parent::td[1]//text()").extract()).replace("发文单位：",
                                                                                                               "").strip()
    if organ.startswith('省'):
        organ = '广西' + organ
    data["organ"] = cleanSemicolon(organ)
    written_date = ''.join(sel.xpath("//strong[contains(string(), '成文日期')]/parent::td[1]//text()").extract()).replace(
        "成文日期：", "").strip()
    data['written_date'] = clean_pubdate(written_date)
    pub_no = ''.join(sel.xpath("//strong[contains(string(), '发文字号')]/parent::td[1]//text()").extract()).replace("发文字号：",
                                                                                                                "").strip()
    data["pub_no"] = cleanSemicolon(pub_no)
    index_no = ''.join(sel.xpath("//strong[contains(string(), '索')]/parent::td[1]//text()").extract()).strip()
    if "：" in index_no:
        tmps = index_no.split("：")
        index_no = tmps[1].strip()
    data["index_no"] = cleanSemicolon(index_no)
    ls_str = ''.join(sel.xpath("//strong[contains(string(), '效力状态')]/parent::td[1]//text()").extract()).replace("效力状态：",
                                                                                                                "").strip()
    tmps = re.findall(r"isok='(.*?)'", ls_str)
    legal_status = ""
    if checkExist(tmps):
        if tmps[0] == '0' or tmps[0] == '有效':
            legal_status = "有效"
        elif tmps[0] == '1' or tmps[0] == '失效':
            legal_status = "失效"
    data["legal_status"] = cleanSemicolon(legal_status)
    fulltext_xpath = "//div[@class='article-con']"
    fulltext = sel.xpath(fulltext_xpath).get("")
    if not checkExist(fulltext):
        fulltext_xpath = "//div[@id='viewMainSection']"
        fulltext = sel.xpath(fulltext_xpath).get("")
        if not checkExist(fulltext):
            raise Exception
    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, sel, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_laibinlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_json = json.loads(callmodel.sql_model.list_json)
        list_rawid = callmodel.sql_model.list_rawid
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            max_count = re.findall('"shtml", *"(\d+)', para_dicts["data"]["1_1"]['html'])
            max_count = int(max_count[0]) if max_count else 1
            if 'zcwj/gfxwjdl' in list_rawid:
                total_page = math.ceil(max_count/20)
            else:
                total_page = math.ceil(max_count/30)
            total_page = total_page if total_page < 34 else 34
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                page_info = "index_{}.shtml".format(page)
                dic = {"page_info": page_info}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = res.xpath('//ul[contains(@class,"more-list")]/li|//tbody[@class="dataAll"]/tr')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('.//div[@class="gzk-table-title"]/a/@href|a/@href').extract_first()
            base_url = r'http://www.laibin.gov.cn/{}/index.shtml'.format(callmodel.sql_model.list_rawid)
            url = parse.urljoin(base_url, href)
            if 'html' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99398'
            article_json["url"] = url
            article_json["title"] = li.xpath('.//div[@class="gzk-table-title"]/a/text()|a/text()').extract_first().strip()
            if 'zcwj/gfxwjdl' in list_rawid:
                pub_date = ""
            else:
                pub_date = li.xpath('span/text()').extract_first().strip()
                pub_date = clean_pubdate(pub_date)
                if len(pub_date) != 8:
                    pub_date = ""
            article_json["pub_date"] = pub_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_laibinarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_laibinarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])
    src_data = down_model["1_1"]
    sel = Selector(src_data.html)

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = "99398"
    product = "LAIBIN"
    zt_provider = "laibingovpolicy"
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    data["provider_url"] = article_json['url']
    title = sel.xpath("//div[@class='article']/h1/text()")
    if checkExist(title):
        title = ''.join(title.extract())
    else:
        title = article_json['title']
    data["title"] = cleanSemicolon(title)
    pub_date = article_json.get("pub_date", "")
    pub_year = pub_date[0:4]
    if not pub_date:
        pub_date_info = ''.join(sel.xpath('//div[@class="article-inf-left"]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[0:4]
    if not pub_date:
        raise Exception
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year
    organ = ''.join(sel.xpath("//strong[contains(string(), '发文单位')]/parent::td[1]//text()").extract()).replace("发文单位：",
                                                                                                               "").strip()
    if organ.startswith('省'):
        organ = '广西' + organ
    data["organ"] = cleanSemicolon(organ)
    written_date = ''.join(sel.xpath("//strong[contains(string(), '成文日期')]/parent::td[1]//text()").extract()).replace(
        "成文日期：", "").strip()
    data['written_date'] = clean_pubdate(written_date)
    pub_no = ''.join(sel.xpath("//strong[contains(string(), '发文字号')]/parent::td[1]//text()").extract()).replace("发文字号：",
                                                                                                                "").strip()
    data["pub_no"] = cleanSemicolon(pub_no)
    index_no = ''.join(sel.xpath("//strong[contains(string(), '索')]/parent::td[1]//text()").extract()).strip()
    if "：" in index_no:
        tmps = index_no.split("：")
        index_no = tmps[1].strip()
    data["index_no"] = cleanSemicolon(index_no)
    ls_str = ''.join(sel.xpath("//strong[contains(string(), '效力状态')]/parent::td[1]//text()").extract()).replace("效力状态：",
                                                                                                                "").strip()
    tmps = re.findall(r"isok='(.*?)'", ls_str)
    legal_status = ""
    if checkExist(tmps):
        if tmps[0] == '0' or tmps[0] == '有效':
            legal_status = "有效"
        elif tmps[0] == '1' or tmps[0] == '失效':
            legal_status = "失效"
    data["legal_status"] = cleanSemicolon(legal_status)
    fulltext_xpath = "//div[@class='article-con']"
    fulltext = sel.xpath(fulltext_xpath).get("")
    if not checkExist(fulltext):
        fulltext_xpath = "//div[@id='viewMainSection']"
        fulltext = sel.xpath(fulltext_xpath).get("")
        if not checkExist(fulltext):
            raise Exception
    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, sel, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, sel, f'(//div[@id="downloadfile"])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_gxhzlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = res.xpath("//script[contains(text(),'createPageHTML')]/text()").extract_first()
        if page_info:
            tmps = re.findall(r'createPageHTML\((.*?)\);', page_info)
            if checkExist(tmps):
                tmps = tmps[0].split(",")
                total_page = int(tmps[0])
            else:
                total_page = 1
        list_json = json.loads(callmodel.sql_model.list_json)
        list_rawid = callmodel.sql_model.list_rawid
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                page_info = "index_{}.shtml".format(page)
                dic = {"page_info": page_info}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = res.xpath('//ul[contains(@class,"more-list")]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            a_info = li.xpath('./a')
            href = a_info.xpath('./@href').get("")
            base_url = r'http://www.gxhz.gov.cn/{}/{}'.format(callmodel.sql_model.list_rawid, list_json["page_info"])
            if href.startswith("/"):
                url = 'http://www.gxhz.gov.cn{}'.format(href)
            else:
                url = parse.urljoin(base_url, href)
            if 'shtml' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99399'
            article_json["url"] = url
            article_json["title"] = a_info.xpath('./text()').extract_first().strip()
            pub_date = li.xpath("./span/text()").extract_first()
            pub_date = clean_pubdate(pub_date)
            if len(pub_date) != 8:
                pub_date = ""
            article_json["pub_date"] = pub_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_gxhzarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_gxhzarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])
    src_data = down_model["1_1"]
    sel = Selector(src_data.html)

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = "99399"
    product = "GXHZ"
    zt_provider = "gxhzgovpolicy"
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    data["provider_url"] = article_json['url']
    title = sel.xpath("//div[@class='article']/h1/text()")
    if checkExist(title):
        title = ''.join(title.extract())
    else:
        title = article_json['title']
    data["title"] = cleanSemicolon(title)
    pub_date = article_json.get("pub_date", "")
    pub_year = ""
    if len(pub_date) == 8:
        pub_year = pub_date[0:4]
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year
    organ = ''.join(sel.xpath("//strong[contains(string(), '发文单位')]/parent::td[1]//text()").extract()).replace("发文单位：",
                                                                                                               "").strip()
    if organ.startswith('省'):
        organ = '广西' + organ
    data["organ"] = cleanSemicolon(organ)
    written_date = ''.join(sel.xpath("//strong[contains(string(), '成文日期')]/parent::td[1]//text()").extract()).replace(
        "成文日期：", "").strip()
    data['written_date'] = clean_pubdate(written_date)
    pub_no = ''.join(sel.xpath("//strong[contains(string(), '发文字号')]/parent::td[1]//text()").extract()).replace("发文字号：",
                                                                                                                "").strip()
    data["pub_no"] = cleanSemicolon(pub_no)
    index_no = ''.join(sel.xpath("//strong[contains(string(), '索')]/parent::td[1]//text()").extract()).strip()
    if "：" in index_no:
        tmps = index_no.split("：")
        index_no = tmps[1].strip()
    data["index_no"] = cleanSemicolon(index_no)
    ls_str = ''.join(sel.xpath("//strong[contains(string(), '效力状态')]/parent::td[1]//text()").extract()).replace("效力状态：",
                                                                                                                "").strip()
    tmps = re.findall(r"isok='(.*?)'", ls_str)
    legal_status = ""
    if checkExist(tmps):
        if tmps[0] == '0' or tmps[0] == '有效':
            legal_status = "有效"
        elif tmps[0] == '1' or tmps[0] == '失效':
            legal_status = "失效"
    data["legal_status"] = cleanSemicolon(legal_status)
    fulltext_xpath = "//div[@class='article-con']"
    fulltext = sel.xpath(fulltext_xpath).get("")
    if not checkExist(fulltext):
        fulltext_xpath = "//div[@id='viewMainSection']"
        fulltext = sel.xpath(fulltext_xpath).get("")
        if not checkExist(fulltext):
            raise Exception
    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, sel, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_yulinlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = res.xpath("//script[contains(text(),'createPageHTML')]/text()").extract_first()
        if page_info:
            tmps = re.findall(r'createPageHTML\((.*?)\);', page_info)
            if checkExist(tmps):
                tmps = tmps[0].split(",")
                total_page = int(tmps[0])
            else:
                total_page = 1
        list_json = json.loads(callmodel.sql_model.list_json)
        list_rawid = callmodel.sql_model.list_rawid
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                page_info = "index_{}.shtml".format(page)
                dic = {"page_info": page_info}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = res.xpath('//ul[contains(@class,"more-list")]/li')
        if not checkExist(li_list):
            li_list = res.xpath('//ul[contains(@class,"zfxxgk-more-list")]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            a_info = li.xpath('./a')
            href = a_info.xpath('./@href').get("")
            base_url = r'http://www.yulin.gov.cn/{}/{}'.format(callmodel.sql_model.list_rawid, list_json["page_info"])
            if href.startswith("/"):
                url = 'http://www.yulin.gov.cn{}'.format(href)
            else:
                url = parse.urljoin(base_url, href)
            if 'shtml' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99400'
            article_json["url"] = url
            article_json["title"] = a_info.xpath('./text()').extract_first().strip()
            pub_date = li.xpath("./span/text()").extract_first()
            pub_date = clean_pubdate(pub_date)
            if len(pub_date) != 8:
                pub_date = ""
            article_json["pub_date"] = pub_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_yulinarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_yulinarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])
    src_data = down_model["1_1"]
    sel = Selector(src_data.html)

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = "99400"
    product = "YULIN"
    zt_provider = "yulingovpolicy"
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    data["provider_url"] = article_json['url']
    title = sel.xpath("//div[@class='article']/h1/text()")
    if checkExist(title):
        title = ''.join(title.extract())
    else:
        title = article_json['title']
    data["title"] = cleanSemicolon(title)
    pub_date = article_json.get("pub_date", "")
    pub_year = ""
    if len(pub_date) == 8:
        pub_year = pub_date[0:4]
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year
    organ = ''.join(sel.xpath("//strong[contains(string(), '发文单位')]/parent::td[1]//text()").extract()).replace("发文单位：",
                                                                                                               "").strip()
    if organ.startswith('省'):
        organ = '广西' + organ
    data["organ"] = cleanSemicolon(organ)
    written_date = ''.join(sel.xpath("//strong[contains(string(), '成文日期')]/parent::td[1]//text()").extract()).replace(
        "成文日期：", "").strip()
    data['written_date'] = clean_pubdate(written_date)
    pub_no = ''.join(sel.xpath("//strong[contains(string(), '发文字号')]/parent::td[1]//text()").extract()).replace("发文字号：",
                                                                                                                "").strip()
    data["pub_no"] = cleanSemicolon(pub_no)
    index_no = ''.join(sel.xpath("//strong[contains(string(), '索')]/parent::td[1]//text()").extract()).strip()
    if "：" in index_no:
        tmps = index_no.split("：")
        index_no = tmps[1].strip()
    data["index_no"] = cleanSemicolon(index_no)
    ls_str = ''.join(sel.xpath("//strong[contains(string(), '效力状态')]/parent::td[1]//text()").extract()).replace("效力状态：",
                                                                                                                "").strip()
    tmps = re.findall(r"isok='(.*?)'", ls_str)
    legal_status = ""
    if checkExist(tmps):
        if tmps[0] == '0' or tmps[0] == '有效':
            legal_status = "有效"
        elif tmps[0] == '1' or tmps[0] == '失效':
            legal_status = "失效"
    data["legal_status"] = cleanSemicolon(legal_status)
    fulltext_xpath = "//div[@class='article-con']"
    fulltext = sel.xpath(fulltext_xpath).get("")
    if not checkExist(fulltext):
        fulltext_xpath = "//div[@id='viewMainSection']"
        fulltext = sel.xpath(fulltext_xpath).get("")
        if not checkExist(fulltext):
            raise Exception
    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, sel, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_baiselist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = res.xpath("//script[contains(text(),'createPageHTML')]/text()").extract_first()
        if page_info:
            tmps = re.findall(r'createPageHTML\((.*?)\);', page_info)
            if checkExist(tmps):
                tmps = tmps[0].split(",")
                total_page = int(tmps[0])
            else:
                total_page = 1
        list_json = json.loads(callmodel.sql_model.list_json)
        list_rawid = callmodel.sql_model.list_rawid
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                page_info = "index_{}.shtml".format(page)
                dic = {"page_info": page_info}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = res.xpath('//ul[contains(@class,"more-list")]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            a_info = li.xpath('./a')
            href = a_info.xpath('./@href').get("")
            base_url = r'http://www.baise.gov.cn/{}/{}'.format(callmodel.sql_model.list_rawid, list_json["page_info"])
            if href.startswith("/"):
                url = 'http://www.baise.gov.cn{}'.format(href)
            else:
                url = parse.urljoin(base_url, href)
            if 'html' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99401'
            article_json["url"] = url
            article_json["title"] = a_info.xpath('./text()').extract_first().strip()
            pub_date = li.xpath("./span/text()").extract_first()
            pub_date = clean_pubdate(pub_date)
            if len(pub_date) != 8:
                pub_date = ""
            article_json["pub_date"] = pub_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_baiselist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:
        data = para_dicts["data"]["1_1"]['data']
        total_page = data["pager"]["pageCount"]
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        li_list = data["list"]
        for item in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = item.get("doc_pub_url", "")
            if 'shtml' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99401'
            article_json["url"] = url
            article_json["title"] = item.get("f_202183157605")
            article_json["pub_date"] = clean_pubdate(item.get("save_time"))
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_baisearticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_baisearticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])
    src_data = down_model["1_1"]
    sel = Selector(src_data.html)

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = "99401"
    product = "BAISE"
    zt_provider = "baisegovpolicy"
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    data["provider_url"] = article_json['url']
    title = sel.xpath("//div[@class='article']/h1/text()")
    if checkExist(title):
        title = ''.join(title.extract())
    else:
        title = article_json['title']
    data["title"] = cleanSemicolon(title)
    pub_date = article_json.get("pub_date", "")
    pub_year = ""
    if len(pub_date) == 8:
        pub_year = pub_date[0:4]
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year
    organ = ''.join(sel.xpath("//strong[contains(string(), '发文单位')]/parent::td[1]//text()").extract()).replace("发文单位：",
                                                                                                               "").strip()
    if organ.startswith('省'):
        organ = '广西' + organ
    data["organ"] = cleanSemicolon(organ)
    written_date = ''.join(sel.xpath("//strong[contains(string(), '成文日期')]/parent::td[1]//text()").extract()).replace(
        "成文日期：", "").strip()
    data['written_date'] = clean_pubdate(written_date)
    pub_no = ''.join(sel.xpath("//strong[contains(string(), '发文字号')]/parent::td[1]//text()").extract()).replace("发文字号：",
                                                                                                                "").strip()
    data["pub_no"] = cleanSemicolon(pub_no)
    index_no = ''.join(sel.xpath("//strong[contains(string(), '索')]/parent::td[1]//text()").extract()).strip()
    if "：" in index_no:
        tmps = index_no.split("：")
        index_no = tmps[1].strip()
    data["index_no"] = cleanSemicolon(index_no)
    ls_str = ''.join(sel.xpath("//strong[contains(string(), '效力状态')]/parent::td[1]//text()").extract()).replace("效力状态：",
                                                                                                                "").strip()
    tmps = re.findall(r"isok='(.*?)'", ls_str)
    legal_status = ""
    if checkExist(tmps):
        if tmps[0] == '0' or tmps[0] == '有效':
            legal_status = "有效"
        elif tmps[0] == '1' or tmps[0] == '失效':
            legal_status = "失效"
    data["legal_status"] = cleanSemicolon(legal_status)
    fulltext_xpath = "//div[@class='article-con']"
    fulltext = sel.xpath(fulltext_xpath).get("")
    if not checkExist(fulltext):
        fulltext_xpath = "//div[@id='viewMainSection']"
        fulltext = sel.xpath(fulltext_xpath).get("")
        if not checkExist(fulltext):
            raise Exception
    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, sel, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_hechilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_json = json.loads(callmodel.sql_model.list_json)
        list_rawid = callmodel.sql_model.list_rawid
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            max_count = re.findall("createPageHTML\((\d+)", para_dicts["data"]["1_1"]['html'])
            max_count = int(max_count[0]) if max_count else 1
            total_page = max_count
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                page_info = "index_{}.shtml".format(page)
                dic = {"page_info": page_info}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = res.xpath('//ul[contains(@class,"more-list")]/li|//tbody[@class="dataAll"]/tr')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('.//div[@class="gzk-table-title"]/a/@href|a/@href').extract_first()
            base_url = r'http://www.hechi.gov.cn/{}/{}'.format(callmodel.sql_model.list_rawid, list_json["page_info"])
            url = parse.urljoin(base_url, href)
            if 'html' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99402'
            article_json["url"] = url
            article_json["title"] = li.xpath('.//div[@class="gzk-table-title"]/a/text()|a/text()').extract_first()
            pub_date_info = li.xpath('.//div[@class="gzk-table-time"]/text()|span/text()').extract_first()
            pub_date = clean_pubdate(pub_date_info.split('成文日期：')[-1])
            if len(pub_date) != 8:
                pub_date = ""
            article_json["pub_date"] = pub_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_hechilist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:
        data = para_dicts["data"]["1_1"]['data']
        total_page = data["pager"]["pageCount"]
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        li_list = data["list"]
        for item in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = item.get("doc_pub_url", "")
            if 'shtml' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99402'
            article_json["url"] = url
            article_json["title"] = item.get("f_202183157605")
            article_json["pub_date"] = clean_pubdate(item.get("save_time"))
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_hechiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_hechiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])
    src_data = down_model["1_1"]
    sel = Selector(src_data.html)

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = "99402"
    product = "HECHI"
    zt_provider = "hechigovpolicy"
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    data["provider_url"] = article_json['url']
    title = sel.xpath("//div[@class='article']/h1/text()")
    if checkExist(title):
        title = ''.join(title.extract())
    else:
        title = article_json['title']
    data["title"] = cleanSemicolon(title)
    pub_date = article_json.get("pub_date", "")
    pub_year = ""
    if len(pub_date) == 8:
        pub_year = pub_date[0:4]
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year
    organ = ''.join(sel.xpath("//strong[contains(string(), '发文单位')]/parent::td[1]//text()").extract()).replace("发文单位：",
                                                                                                               "").strip()
    if organ.startswith('省'):
        organ = '广西' + organ
    data["organ"] = cleanSemicolon(organ)
    written_date = ''.join(sel.xpath("//strong[contains(string(), '成文日期')]/parent::td[1]//text()").extract()).replace(
        "成文日期：", "").strip()
    data['written_date'] = clean_pubdate(written_date)
    pub_no = ''.join(sel.xpath("//strong[contains(string(), '发文字号')]/parent::td[1]//text()").extract()).replace("发文字号：",
                                                                                                                "").strip()
    data["pub_no"] = cleanSemicolon(pub_no)
    index_no = ''.join(sel.xpath("//strong[contains(string(), '索')]/parent::td[1]//text()").extract()).strip()
    if "：" in index_no:
        tmps = index_no.split("：")
        index_no = tmps[1].strip()
    data["index_no"] = cleanSemicolon(index_no)
    ls_str = ''.join(sel.xpath("//strong[contains(string(), '效力状态')]/parent::td[1]//text()").extract()).replace("效力状态：",
                                                                                                                "").strip()
    tmps = re.findall(r"isok='(.*?)'", ls_str)
    legal_status = ""
    if checkExist(tmps):
        if tmps[0] == '0' or tmps[0] == '有效':
            legal_status = "有效"
        elif tmps[0] == '1' or tmps[0] == '失效':
            legal_status = "失效"
    data["legal_status"] = cleanSemicolon(legal_status)
    fulltext_xpath = "//div[@class='article-con']"
    fulltext = sel.xpath(fulltext_xpath).get("")
    if not checkExist(fulltext):
        fulltext_xpath = "//div[@id='viewMainSection']"
        fulltext = sel.xpath(fulltext_xpath).get("")
        if not checkExist(fulltext):
            raise Exception
    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, sel, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_qinzhoulist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            max_count = re.findall("createPageHTML\((\d+)", para_dicts["data"]["1_1"]['html'])
            max_count = int(max_count[0]) if max_count else 1
            total_page = max_count
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"index_{page}.shtml"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        list_json = json.loads(callmodel.sql_model.list_json)
        list_rawid = callmodel.sql_model.list_rawid
        li_list = res.xpath('//ul[@class="more-list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = r'http://www.qinzhou.gov.cn/{}/index.shtml'.format(callmodel.sql_model.list_rawid)
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99403'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            pub_date = li.xpath('span/text()').extract_first().strip()
            pub_date = clean_pubdate(pub_date)
            if len(pub_date) != 8:
                pub_date = ""
            article_json["pub_date"] = pub_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_qinzhouarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_qinzhouarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="article"]/h1//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath(
        '//div[@class="people-desc"]//strong[contains(text(),"发文字号")]/parent::td[1]//text()').extract()).strip()
    pub_no = pub_no.split('：')[-1].strip()
    written_date = ''.join(res.xpath(
        '//div[@class="people-desc"]//strong[contains(text(),"成文日期")]/parent::td[1]//text()').extract()).strip()
    written_date = written_date.split('：')[-1].strip()
    index_no = ''.join(res.xpath(
        '//div[@class="people-desc"]//strong[contains(text(),"索")]/parent::td[1]//text()').extract()).strip()
    index_no = index_no.split('：')[-1].strip()
    legal_status = ''.join(res.xpath(
        '//div[@class="people-desc"]//strong[contains(text(),"效力状态")]/parent::td[1]/text()').extract()).strip()
    index_no = index_no.split('：')[-1].strip()
    organ = ''.join(res.xpath(
        '//div[@class="people-desc"]//strong[contains(text(),"发文单位")]/parent::td[1]//text()').extract()).strip()
    organ = organ.split('：')[-1].strip()
    if organ.startswith('市'):
        organ = '钦州' + organ

    fulltext_xpath = '//div[@class="article-con"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99403'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "QINZHOU"
    zt_provider = "qinzhougovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_fcgslist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            max_count = re.findall("createPageHTML\((\d+)", para_dicts["data"]["1_1"]['html'])
            max_count = int(max_count[0]) if max_count else 1
            total_page = max_count
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"index_{page}.shtml"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        list_json = json.loads(callmodel.sql_model.list_json)
        list_rawid = callmodel.sql_model.list_rawid
        li_list = res.xpath('//ul[@class="more-list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = r'http://http://www.fcgs.gov.cn/{}/index.shtml'.format(callmodel.sql_model.list_rawid)
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99404'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            pub_date = li.xpath('span/text()').extract_first().strip()
            pub_date = clean_pubdate(pub_date)
            if len(pub_date) != 8:
                pub_date = ""
            article_json["pub_date"] = pub_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_fcgsarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_fcgsarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="article"]/h1//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath(
        '//div[@class="people-desc"]//strong[contains(text(),"发文字号")]/parent::td[1]//text()').extract()).strip()
    pub_no = pub_no.split('：')[-1].strip()
    written_date = ''.join(res.xpath(
        '//div[@class="people-desc"]//strong[contains(text(),"成文日期")]/parent::td[1]//text()').extract()).strip()
    written_date = written_date.split('：')[-1].strip()
    index_no = ''.join(res.xpath(
        '//div[@class="people-desc"]//strong[contains(text(),"索")]/parent::td[1]//text()').extract()).strip()
    index_no = index_no.split('：')[-1].strip()
    legal_status = ''.join(res.xpath(
        '//div[@class="people-desc"]//strong[contains(text(),"效力状态")]/parent::td[1]/text()').extract()).strip()
    index_no = index_no.split('：')[-1].strip()
    organ = ''.join(res.xpath(
        '//div[@class="people-desc"]//strong[contains(text(),"发文单位")]/parent::td[1]//text()').extract()).strip()
    organ = organ.split('：')[-1].strip()
    if organ.startswith('市'):
        organ = '防城港' + organ

    fulltext_xpath = '//div[@class="article-con"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99404'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "FCGS"
    zt_provider = "fcgsgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_gxgglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = res.xpath("//script[contains(text(),'createPageHTML')]/text()").extract_first()
        if page_info:
            tmps = re.findall(r'createPageHTML\((.*?)\);', page_info)
            if checkExist(tmps):
                tmps = tmps[0].split(",")
                total_page = int(tmps[0])
            else:
                total_page = 1
        list_json = json.loads(callmodel.sql_model.list_json)
        list_rawid = callmodel.sql_model.list_rawid
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                page_info = "index_{}.shtml".format(page)
                dic = {"page_info": page_info}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = res.xpath('//ul[contains(@class,"more-list")]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            a_info = li.xpath('./a')
            href = a_info.xpath('./@href').get("")
            base_url = r'http://www.gxgg.gov.cn/{}/{}'.format(callmodel.sql_model.list_rawid, list_json["page_info"])
            if href.startswith("/"):
                url = 'http://www.gxgg.gov.cn{}'.format(href)
            else:
                url = parse.urljoin(base_url, href)
            if 'html' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99405'
            article_json["url"] = url
            article_json["title"] = a_info.xpath('./text()').extract_first().strip()
            pub_date = li.xpath("./span/text()").extract_first()
            pub_date = clean_pubdate(pub_date)
            if len(pub_date) != 8:
                pub_date = ""
            article_json["pub_date"] = pub_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_gxgglist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:
        data = para_dicts["data"]["1_1"]['data']
        total_page = data["pager"]["pageCount"]
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        li_list = data["list"]
        for item in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = item.get("doc_pub_url", "")
            if 'shtml' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99405'
            article_json["url"] = url
            article_json["title"] = item.get("f_202183157605")
            article_json["pub_date"] = clean_pubdate(item.get("save_time"))
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_gxggarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_gxggarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])
    src_data = down_model["1_1"]
    sel = Selector(src_data.html)

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = "99405"
    product = "GXGG"
    zt_provider = "gxgggovpolicy"
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    data["provider_url"] = article_json['url']
    title = sel.xpath("//div[@class='article']/h1/text()")
    if checkExist(title):
        title = ''.join(title.extract())
    else:
        title = article_json['title']
    data["title"] = cleanSemicolon(title)
    pub_date = article_json.get("pub_date", "")
    pub_year = ""
    if len(pub_date) == 8:
        pub_year = pub_date[0:4]
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year
    organ = ''.join(sel.xpath("//strong[contains(string(), '发文单位')]/parent::td[1]//text()").extract()).replace("发文单位：",
                                                                                                               "").strip()
    if organ.startswith('省'):
        organ = '广西' + organ
    data["organ"] = cleanSemicolon(organ)
    written_date = ''.join(sel.xpath("//strong[contains(string(), '成文日期')]/parent::td[1]//text()").extract()).replace(
        "成文日期：", "").strip()
    data['written_date'] = clean_pubdate(written_date)
    pub_no = ''.join(sel.xpath("//strong[contains(string(), '发文字号')]/parent::td[1]//text()").extract()).replace("发文字号：",
                                                                                                                "").strip()
    data["pub_no"] = cleanSemicolon(pub_no)
    index_no = ''.join(sel.xpath("//strong[contains(string(), '索')]/parent::td[1]//text()").extract()).strip()
    if "：" in index_no:
        tmps = index_no.split("：")
        index_no = tmps[1].strip()
    data["index_no"] = cleanSemicolon(index_no)
    ls_str = ''.join(sel.xpath("//strong[contains(string(), '效力状态')]/parent::td[1]//text()").extract()).replace("效力状态：",
                                                                                                                "").strip()
    tmps = re.findall(r"isok='(.*?)'", ls_str)
    legal_status = ""
    if checkExist(tmps):
        if tmps[0] == '0' or tmps[0] == '有效':
            legal_status = "有效"
        elif tmps[0] == '1' or tmps[0] == '失效':
            legal_status = "失效"
    data["legal_status"] = cleanSemicolon(legal_status)
    fulltext_xpath = "//div[@class='article-con']"
    fulltext = sel.xpath(fulltext_xpath).get("")
    if not checkExist(fulltext):
        fulltext_xpath = "//div[@id='viewMainSection']"
        fulltext = sel.xpath(fulltext_xpath).get("")
        if not checkExist(fulltext):
            raise Exception
    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, sel, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_fgwhenanlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        div_page = res.xpath("//div[@id='pageDec']")
        page_size = div_page.xpath("./@pagesize").get("")
        page_count = div_page.xpath("./@pagecount").get("")
        if len(page_size) > 0 and len(page_count) > 0:
            total_page = int((int(page_count) + int(page_size) - 1) / int(page_size))
        else:
            total_page = 1
        list_json = json.loads(callmodel.sql_model.list_json)
        list_rawid = callmodel.sql_model.list_rawid
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                page_info = "index_{}.html".format(page)
                dic = {"page_info": page_info}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = res.xpath('//div[contains(@class,"news News")]/ul/li')
        if not checkExist(li_list):
            li_list = res.xpath('//div[contains(@class,"zfxxgk_zdgkc")]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            a_info = li.xpath('.//a')
            if not checkExist(a_info):
                continue
            href = a_info.xpath('./@href').get("")
            base_url = r'https://fgw.henan.gov.cn/{}/{}'.format(callmodel.sql_model.list_rawid, list_json["page_info"])
            if href.startswith("/"):
                url = 'https://fgw.henan.gov.cn{}'.format(href)
            else:
                url = parse.urljoin(base_url, href)
            if 'html' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99406'
            article_json["url"] = url
            article_json["title"] = a_info.xpath('./text()').extract_first().strip()
            pub_date = li.xpath("./span/text()")
            if not checkExist(pub_date):
                pub_date = li.xpath("./b/text()")
            pub_date = clean_pubdate(pub_date.extract_first())
            if len(pub_date) != 8:
                pub_date = ""
            article_json["pub_date"] = pub_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_fgwhenanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_fgwhenanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])
    src_data = down_model["1_1"]
    sel = Selector(src_data.html)

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = "99406"
    product = "FGWHENAN"
    zt_provider = "fgwhenangovpolicy"
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    data["provider_url"] = article_json['url']
    title = sel.xpath("//p[@class='yTit']/text()").get("")
    if checkExist(title):
        title = cleanSemicolon(title)
    else:
        title = article_json['title']
    data["title"] = cleanSemicolon(title)
    pub_date = article_json.get("pub_date", "")
    pub_year = ""
    if len(pub_date) == 8:
        pub_year = pub_date[0:4]
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year

    organ = ''.join(sel.xpath("//span[contains(string(), '主办单位')]/text()").extract()).replace("主办单位：", "").strip()
    if organ.startswith('省'):
        organ = '河南' + organ
    data["organ"] = cleanSemicolon(organ)
    pub_no = ''.join(sel.xpath("//span[contains(string(), '文号')]/text()").extract()).replace("文号：", "").strip()
    data["pub_no"] = cleanSemicolon(pub_no)
    index_no = ''.join(sel.xpath("//span[contains(string(), '索引号')]/text()").extract()).replace("索引号：", "").strip()
    data["index_no"] = cleanSemicolon(index_no)

    fulltext_xpath = "//div[@class='conBox']"
    fulltext = sel.xpath(fulltext_xpath).get("")
    if not checkExist(fulltext):
        raise Exception
    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, sel, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_gxthenanlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        div_page = res.xpath("//div[@id='pageDec']")
        page_size = div_page.xpath("./@pagesize").get("")
        page_count = div_page.xpath("./@pagecount").get("")
        if len(page_size) > 0 and len(page_count) > 0:
            total_page = int((int(page_count) + int(page_size) - 1) / int(page_size))
        else:
            total_page = 1
        list_json = json.loads(callmodel.sql_model.list_json)
        list_rawid = callmodel.sql_model.list_rawid
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                page_info = "index_{}.html".format(page)
                dic = {"page_info": page_info}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = res.xpath('//div[contains(@class,"news News")]/ul/li')
        if not checkExist(li_list):
            li_list = res.xpath('//div[contains(@class,"zfxxgk_zdgkc")]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            a_info = li.xpath('.//a')
            if not checkExist(a_info):
                continue
            href = a_info.xpath('./@href').get("")
            base_url = r'https://gxt.henan.gov.cn/{}/{}'.format(callmodel.sql_model.list_rawid, list_json["page_info"])
            if href.startswith("/"):
                url = 'https://gxt.henan.gov.cn{}'.format(href)
            else:
                url = parse.urljoin(base_url, href)
            if 'html' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99407'
            article_json["url"] = url
            article_json["title"] = a_info.xpath('./text()').extract_first().strip()
            pub_date = li.xpath("./span/text()")
            if not checkExist(pub_date):
                pub_date = li.xpath("./b/text()")
            pub_date = clean_pubdate(pub_date.extract_first())
            if len(pub_date) != 8:
                pub_date = ""
            article_json["pub_date"] = pub_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_gxthenanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_gxthenanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])
    src_data = down_model["1_1"]
    sel = Selector(src_data.html)

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = "99407"
    product = "GXTHENAN"
    zt_provider = "gxthenangovpolicy"
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    data["provider_url"] = article_json['url']
    title = sel.xpath("//p[@class='yTit']/text()").get("")
    if checkExist(title):
        title = cleanSemicolon(title)
    else:
        title = article_json['title']
    if '...' in title:
        title = cleaned(sel.xpath('//meta[@name="ArticleTitle"]/@content').extract_first())
    data["title"] = cleanSemicolon(title)
    pub_date = article_json.get("pub_date", "")
    pub_year = ""
    if len(pub_date) == 8:
        pub_year = pub_date[0:4]
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year

    organ = ''.join(sel.xpath("//span[contains(string(), '主办单位')]/text()").extract()).replace("主办单位：", "").strip()
    if organ.startswith('省'):
        organ = '河南' + organ
    data["organ"] = cleanSemicolon(organ)
    pub_no = ''.join(sel.xpath("//span[contains(string(), '文号')]/text()").extract()).replace("文号：", "").strip()
    data["pub_no"] = cleanSemicolon(pub_no)
    index_no = ''.join(sel.xpath("//span[contains(string(), '索引号')]/text()").extract()).replace("索引号：", "").strip()
    data["index_no"] = cleanSemicolon(index_no)

    fulltext_xpath = "//div[@class='conBox']"
    fulltext = sel.xpath(fulltext_xpath).get("")
    if not checkExist(fulltext):
        raise Exception
    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, sel, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_kjthenanlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        div_page = res.xpath("//div[@id='pageDec']")
        page_size = div_page.xpath("./@pagesize").get("")
        page_count = div_page.xpath("./@pagecount").get("")
        if len(page_size) > 0 and len(page_count) > 0:
            total_page = int((int(page_count) + int(page_size) - 1) / int(page_size))
        else:
            total_page = 1
        list_json = json.loads(callmodel.sql_model.list_json)
        list_rawid = callmodel.sql_model.list_rawid
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                page_info = "index_{}.html".format(page)
                dic = {"page_info": page_info}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = res.xpath('//div[@class="list_u1"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            a_info = li.xpath('.//a')
            if not checkExist(a_info):
                continue
            href = a_info.xpath('./@href').get("")
            base_url = r'https://kjt.henan.gov.cn/{}/{}'.format(callmodel.sql_model.list_rawid, list_json["page_info"])
            if href.startswith("/"):
                url = 'https://kjt.henan.gov.cn{}'.format(href)
            else:
                url = parse.urljoin(base_url, href)
            if 'html' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99408'
            article_json["url"] = url
            article_json["title"] = cleanSemicolon(''.join(a_info.xpath(".//text()").extract())).replace("\n",
                                                                                                         "").replace(
                "\t", "")
            pub_date = li.xpath("./span/text()")
            pub_date = clean_pubdate(pub_date.extract_first())
            if len(pub_date) != 8:
                pub_date = ""
            article_json["pub_date"] = pub_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_kjthenanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_kjthenanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])
    src_data = down_model["1_1"]
    sel = Selector(src_data.html)

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = "99408"
    product = "KTTHENAN"
    zt_provider = "kjthenangovpolicy"
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    data["provider_url"] = article_json['url']
    title = sel.xpath("//div[@class='title']/text()").get("")
    if checkExist(title):
        title = cleanSemicolon(title)
    else:
        title = sel.xpath("//div[@class='articl_u1']/h1/text()").get("")
        if checkExist(title):
            title = cleanSemicolon(title)
        else:
            title = article_json['title']
    data["title"] = cleanSemicolon(title)
    pub_date = article_json.get("pub_date", "")
    pub_year = ""
    if len(pub_date) == 8:
        pub_year = pub_date[0:4]
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year

    fulltext_xpath = "//div[@class='context']"
    fulltext = sel.xpath(fulltext_xpath).get("")
    if not checkExist(fulltext):
        fulltext_xpath = "//div[@class='articl_u2']"
        fulltext = sel.xpath(fulltext_xpath).get("")
        if not checkExist(fulltext):
            raise Exception
    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, sel, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_jythenanlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        div_page = res.xpath("//div[@id='pageDec']")
        page_size = div_page.xpath("./@pagesize").get("")
        page_count = div_page.xpath("./@pagecount").get("")
        if len(page_size) > 0 and len(page_count) > 0:
            total_page = int((int(page_count) + int(page_size) - 1) / int(page_size))
        else:
            total_page = 1
        list_json = json.loads(callmodel.sql_model.list_json)
        list_rawid = callmodel.sql_model.list_rawid
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                page_info = "index_{}.html".format(page)
                dic = {"page_info": page_info}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = res.xpath('//div[contains(@class,"list")]/ul/li')
        if not checkExist(li_list):
            li_list = res.xpath('//div[contains(@class,"zfxxgk_zdgkc")]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            a_info = li.xpath('.//a')
            if not checkExist(a_info):
                continue
            href = a_info.xpath('./@href').get("")
            base_url = r'http://jyt.henan.gov.cn/{}/{}'.format(callmodel.sql_model.list_rawid, list_json["page_info"])
            if href.startswith("/"):
                url = 'http://jyt.henan.gov.cn{}'.format(href)
            else:
                url = parse.urljoin(base_url, href)
            if 'html' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99409'
            article_json["url"] = url
            article_json["title"] = a_info.xpath('./text()').extract_first().strip()
            pub_date = li.xpath("./span/text()")
            if not checkExist(pub_date):
                pub_date = li.xpath("./b/text()")
            pub_date = clean_pubdate(pub_date.extract_first())
            if len(pub_date) != 8:
                pub_date = ""
            article_json["pub_date"] = pub_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_jythenanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jythenanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])
    src_data = down_model["1_1"]
    sel = Selector(src_data.html)

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = "99409"
    product = "JYTHENAN"
    zt_provider = "jythenangovpolicy"
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    data["provider_url"] = article_json['url']
    title = "".join(sel.xpath('//div[@class="article"]/h4//text()').extract()).strip()
    if checkExist(title):
        title = cleanSemicolon(title)
    else:
        title = article_json['title']
    data["title"] = cleanSemicolon(title)
    pub_date = article_json.get("pub_date", "")
    pub_year = ""
    if len(pub_date) == 8:
        pub_year = pub_date[0:4]
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year

    fulltext_xpath = "//div[@id='det']"
    fulltext = sel.xpath(fulltext_xpath).get("")
    if not checkExist(fulltext):
        raise Exception
    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, sel, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_mzthenanlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        div_page = res.xpath("//div[@id='pageDec']")
        page_size = div_page.xpath("./@pagesize").get("")
        page_count = div_page.xpath("./@pagecount").get("")
        if len(page_size) > 0 and len(page_count) > 0:
            total_page = int((int(page_count) + int(page_size) - 1) / int(page_size))
        else:
            total_page = 1
        list_json = json.loads(callmodel.sql_model.list_json)
        list_rawid = callmodel.sql_model.list_rawid
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                page_info = "index_{}.html".format(page)
                dic = {"page_info": page_info}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = res.xpath('//div[contains(@class,"newsList")]/ul/li')
        if not checkExist(li_list):
            li_list = res.xpath('//div[contains(@class,"zfxxgk_zdgkc")]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            a_info = li.xpath('.//a')
            if not checkExist(a_info):
                continue
            href = a_info.xpath('./@href').get("")
            base_url = r'https://mzt.henan.gov.cn/{}/{}'.format(callmodel.sql_model.list_rawid, list_json["page_info"])
            if href.startswith("/"):
                url = 'https://mzt.henan.gov.cn{}'.format(href)
            else:
                url = parse.urljoin(base_url, href)
            if 'html' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99410'
            article_json["url"] = url
            article_json["title"] = a_info.xpath('./text()').extract_first().strip()
            pub_date = li.xpath("./span/text()")
            if not checkExist(pub_date):
                pub_date = li.xpath("./b/text()")
            pub_date = clean_pubdate(pub_date.extract_first())
            if len(pub_date) != 8:
                pub_date = ""
            article_json["pub_date"] = pub_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_mzthenanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_mzthenanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])
    src_data = down_model["1_1"]
    sel = Selector(src_data.html)

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = "99410"
    product = "MZTHENAN"
    zt_provider = "mzthenangovpolicy"
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    data["provider_url"] = article_json['url']
    title = sel.xpath("//div[@class='titP']/h4/text()").get("")
    if checkExist(title):
        title = cleanSemicolon(title)
    else:
        title = article_json['title']
    if '...' in title:
        title = ''.join(sel.xpath('//meta[@name="ArticleTitle"]/@content').extract()).strip()
    data["title"] = cleanSemicolon(title)
    pub_date = article_json.get("pub_date", "")
    pub_year = ""
    if len(pub_date) == 8:
        pub_year = pub_date[0:4]
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year

    fulltext_xpath = "//div[@class='conBox']"
    fulltext = sel.xpath(fulltext_xpath).get("")
    if not checkExist(fulltext):
        raise Exception
    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, sel, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_czthenanlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        div_page = res.xpath("//div[@id='pageDec']")
        page_size = div_page.xpath("./@pagesize").get("")
        page_count = div_page.xpath("./@pagecount").get("")
        if len(page_size) > 0 and len(page_count) > 0:
            total_page = int((int(page_count) + int(page_size) - 1) / int(page_size))
        else:
            total_page = 1
        list_json = json.loads(callmodel.sql_model.list_json)
        list_rawid = callmodel.sql_model.list_rawid
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                page_info = "index_{}.html".format(page)
                dic = {"page_info": page_info}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = res.xpath('//ul[contains(@class,"datenews")]/li')
        if not checkExist(li_list):
            li_list = res.xpath('//div[contains(@class,"zfxxgk_zdgkc")]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            a_info = li.xpath('.//a')
            if not checkExist(a_info):
                continue
            href = a_info.xpath('./@href').get("")
            base_url = r'https://czt.henan.gov.cn/{}/{}'.format(callmodel.sql_model.list_rawid, list_json["page_info"])
            if href.startswith("/"):
                url = 'https://czt.henan.gov.cn{}'.format(href)
            else:
                url = parse.urljoin(base_url, href)
            if 'html' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99411'
            article_json["url"] = url
            article_json["title"] = a_info.xpath('./text()').extract_first().strip()
            pub_date = li.xpath("./span/text()")
            if not checkExist(pub_date):
                pub_date = li.xpath("./b/text()")
            pub_date = clean_pubdate(pub_date.extract_first())
            if len(pub_date) != 8:
                pub_date = ""
            article_json["pub_date"] = pub_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_czthenanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_czthenanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])
    src_data = down_model["1_1"]
    sel = Selector(src_data.html)

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = "99411"
    product = "CZTHENAN"
    zt_provider = "czthenangovpolicy"
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    data["provider_url"] = article_json['url']
    title = sel.xpath("//h2[@class='subtitle']/text()").get("")
    if checkExist(title):
        title = cleanSemicolon(title)
    else:
        title = sel.xpath("//h1[@class='newstitle']/text()").get("")
        if checkExist(title):
            title = cleanSemicolon(title)
        else:
            title = article_json['title']
    if '...' in title:
        title = cleaned(sel.xpath('//meta[@name="ArticleTitle"]/@content').extract_first())
    data["title"] = cleanSemicolon(title)
    pub_date = article_json.get("pub_date", "")
    pub_year = ""
    if len(pub_date) == 8:
        pub_year = pub_date[0:4]
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year

    pub_no = ''.join(sel.xpath("//b[contains(string(), '发文字号')]/following::td[1]/text()").extract()).strip()
    data["pub_no"] = cleanSemicolon(pub_no)
    index_no = ''.join(sel.xpath("//b[contains(string(), '索')]/following::td[1]/text()").extract()).strip()
    data["index_no"] = cleanSemicolon(index_no)

    fulltext_xpath = "//div[@id='mainCon']"
    fulltext = sel.xpath(fulltext_xpath).get("")
    if not checkExist(fulltext):
        fulltext_xpath = "//div[@id='content']"
        fulltext = sel.xpath(fulltext_xpath).get("")
        if not checkExist(fulltext):
            raise Exception
    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, sel, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_hrsshenanlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        div_page = res.xpath("//div[@id='pageDec']")
        page_size = div_page.xpath("./@pagesize").get("")
        page_count = div_page.xpath("./@pagecount").get("")
        if len(page_size) > 0 and len(page_count) > 0:
            total_page = int((int(page_count) + int(page_size) - 1) / int(page_size))
        else:
            total_page = 1
        list_json = json.loads(callmodel.sql_model.list_json)
        list_rawid = callmodel.sql_model.list_rawid
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                page_info = "index_{}.html".format(page)
                dic = {"page_info": page_info}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        tr_list = res.xpath('//td[@class="xin2zuo"]/table/tr')
        for tr in tr_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            a_info = tr.xpath('.//a')
            if not checkExist(a_info):
                continue
            href = a_info.xpath('./@href').get("")
            base_url = r'https://hrss.henan.gov.cn/{}/{}'.format(callmodel.sql_model.list_rawid, list_json["page_info"])
            if href.startswith("/"):
                url = 'https://hrss.henan.gov.cn{}'.format(href)
            else:
                url = parse.urljoin(base_url, href)
            if 'html' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99412'
            article_json["url"] = url
            article_json["title"] = a_info.xpath('./text()').extract_first().strip()
            pub_date = tr.xpath(".//a/following::td[1]/text()")
            pub_date = clean_pubdate(pub_date.extract_first())
            if len(pub_date) != 8:
                pub_date = ""
            article_json["pub_date"] = pub_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_hrsshenanlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:
        html = para_dicts["data"]["1_1"]['html']
        ljson = json.loads(re.findall(r"CallBack\(([\s\S]*?)\)", html)[0])
        total_page = ljson["obj"]["totalPage"]
        page_index = int(callmodel.sql_model.page_index)
        list_rawid = callmodel.sql_model.list_rawid
        list_json = json.loads(callmodel.sql_model.list_json)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": page_info.replace("pageNumber=1", "page={}".format(page))}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        li_list = ljson["obj"]["datas"]
        for item in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = item.get("url", "")
            if not url or 'html' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99412'
            article_json["url"] = url
            article_json["title"] = item.get("title")
            article_json["pub_date"] = clean_pubdate(item.get("putDate", ""))
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_hrsshenanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_hrsshenanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])
    src_data = down_model["1_1"]
    sel = Selector(src_data.html)

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = "99412"
    product = "HRSSHENAN"
    zt_provider = "hrsshenangovpolicy"
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    data["provider_url"] = article_json['url']
    title = sel.xpath("//div[@class='title7']/text()").get("")
    if checkExist(title):
        title = cleanSemicolon(title)
    else:
        title = article_json['title']
    data["title"] = cleanSemicolon(title)
    pub_date = article_json.get("pub_date", "")
    pub_year = ""
    if len(pub_date) == 8:
        pub_year = pub_date[0:4]
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year

    organ = ''.join(sel.xpath("//strong[contains(string(), '发文机关')]/parent::div[1]//text()").extract()).replace("发文机关:",
                                                                                                                "").strip()
    if organ.startswith('省'):
        organ = '河南' + organ
    data["organ"] = cleanSemicolon(organ)
    written_date = ''.join(sel.xpath("//strong[contains(string(), '成文日期')]/parent::div[1]//text()").extract()).replace(
        "发文机关:", "").strip()
    data['written_date'] = clean_pubdate(written_date)
    pub_no = ''.join(sel.xpath("//strong[contains(string(), '发文字号')]/parent::div[1]//text()").extract()).replace(
        "发文机关:", "").strip()
    data["pub_no"] = cleanSemicolon(pub_no)

    fulltext_xpath = "//td[@class='context6']"
    fulltext = sel.xpath(fulltext_xpath).get("")
    if not checkExist(fulltext):
        fulltext_xpath = "//div[@class='title7']/following::tr[1]"
        fulltext = sel.xpath(fulltext_xpath).get("")
        if not checkExist(fulltext):
            raise Exception
    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, sel, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_nyncthenanlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        div_page = res.xpath("//div[@id='pageDec']")
        page_size = div_page.xpath("./@pagesize").get("")
        page_count = div_page.xpath("./@pagecount").get("")
        if len(page_size) > 0 and len(page_count) > 0:
            total_page = int((int(page_count) + int(page_size) - 1) / int(page_size))
        else:
            total_page = 1
        list_json = json.loads(callmodel.sql_model.list_json)
        list_rawid = callmodel.sql_model.list_rawid
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                page_info = "index_{}.html".format(page)
                dic = {"page_info": page_info}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = res.xpath('//div[contains(@class,"listbox")]/ul/li')
        if not checkExist(li_list):
            li_list = res.xpath('//div[contains(@class,"zfxxgk_zdgkc")]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            a_info = li.xpath('.//a')
            if not checkExist(a_info):
                continue
            href = a_info.xpath('./@href').get("")
            base_url = r'https://nynct.henan.gov.cn/{}/{}'.format(callmodel.sql_model.list_rawid,
                                                                  list_json["page_info"])
            if href.startswith("/"):
                url = 'https://nynct.henan.gov.cn{}'.format(href)
            else:
                url = parse.urljoin(base_url, href)
            if 'html' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99413'
            article_json["url"] = url
            article_json["title"] = a_info.xpath('./text()').extract_first().strip()
            pub_date = li.xpath("./span/text()")
            if not checkExist(pub_date):
                pub_date = li.xpath("./b/text()")
            pub_date = clean_pubdate(pub_date.extract_first())
            if len(pub_date) != 8:
                pub_date = ""
            article_json["pub_date"] = pub_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_nyncthenanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_nyncthenanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])
    src_data = down_model["1_1"]
    sel = Selector(src_data.html)

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = "99413"
    product = "NYNCTHENAN"
    zt_provider = "nyncthenangovpolicy"
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    data["provider_url"] = article_json['url']
    title = sel.xpath("//h2[@class='subtitle']/text()").get("")
    if checkExist(title):
        title = cleanSemicolon(title)
    else:
        title = sel.xpath("//h1[@class='title']/text()").get("")
        print(title)
        if checkExist(title):
            title = cleanSemicolon(title)
        else:
            title = article_json['title']
    if '...' in title:
        title = ''.join(sel.xpath('//meta[@name="ArticleTitle"]/@content').extract()).strip()
    data["title"] = cleanSemicolon(title)
    pub_date = article_json.get("pub_date", "")
    pub_year = ""
    if len(pub_date) == 8:
        pub_year = pub_date[0:4]
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year

    organ = ''.join(sel.xpath("//b[contains(string(), '发文机关')]/following::td[1]//text()").extract()).strip()
    if organ.startswith('省'):
        organ = '河南' + organ
    data["organ"] = cleanSemicolon(organ)
    written_date = ''.join(sel.xpath("//b[contains(string(), '成文日期')]/following::td[1]//text()").extract()).strip()
    data['written_date'] = clean_pubdate(written_date)
    pub_no = ''.join(sel.xpath("//b[contains(string(), '发文字号')]/following::td[1]//text()").extract()).strip()
    data["pub_no"] = cleanSemicolon(pub_no)
    index_no = ''.join(sel.xpath("//b[contains(string(), '索')]/following::td[1]//text()").extract()).strip()
    data["index_no"] = cleanSemicolon(index_no)
    invalid_date = ''.join(sel.xpath("//b[contains(string(), '失效时间')]/following::td[1]//text()").extract()).strip()
    data["invalid_date"] = clean_pubdate(invalid_date)

    fulltext_xpath = "//div[@class='content']"
    fulltext = sel.xpath(fulltext_xpath).get("")
    if not checkExist(fulltext):
        raise Exception
    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, sel, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_hnjshenanlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        div_page = res.xpath("//div[@id='pageDec']")
        page_size = div_page.xpath("./@pagesize").get("")
        page_count = div_page.xpath("./@pagecount").get("")
        if len(page_size) > 0 and len(page_count) > 0:
            total_page = int((int(page_count) + int(page_size) - 1) / int(page_size))
        else:
            total_page = 1
        list_json = json.loads(callmodel.sql_model.list_json)
        list_rawid = callmodel.sql_model.list_rawid
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                page_info = "index_{}.html".format(page)
                dic = {"page_info": page_info}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = res.xpath('//ul[contains(@id,"artList")]/li')
        if not checkExist(li_list):
            li_list = res.xpath('//div[contains(@class,"zfxxgk_zdgkc")]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            a_info = li.xpath('.//a')
            if not checkExist(a_info):
                continue
            href = a_info.xpath('./@href').get("")
            base_url = r'https://hnjs.henan.gov.cn/{}/{}'.format(callmodel.sql_model.list_rawid, list_json["page_info"])
            if href.startswith("/"):
                url = 'https://hnjs.henan.gov.cn{}'.format(href)
            else:
                url = parse.urljoin(base_url, href)
            if 'html' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99414'
            article_json["url"] = url
            article_json["title"] = a_info.xpath('./text()').extract_first().strip()
            pub_date = li.xpath("./span[@class='time']/text()")
            if not checkExist(pub_date):
                pub_date = li.xpath("./b/text()")
            pub_date = clean_pubdate(pub_date.extract_first())
            if len(pub_date) != 8:
                pub_date = ""
            article_json["pub_date"] = pub_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_hnjshenanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_hnjshenanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])
    src_data = down_model["1_1"]
    sel = Selector(src_data.html)

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = "99414"
    product = "HNJSHENAN"
    zt_provider = "hnjshenangovpolicy"
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    data["provider_url"] = article_json['url']
    title = ''.join(sel.xpath("//div[@class='title']//text()").extract()).strip()
    if checkExist(title):
        title = cleanSemicolon(title)
    else:
        title = article_json['title']
    data["title"] = cleanSemicolon(title)
    pub_date = article_json.get("pub_date", "")
    pub_year = ""
    if len(pub_date) == 8:
        pub_year = pub_date[0:4]
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year

    fulltext_xpath = "//div[@class='news-info']"
    fulltext = sel.xpath(fulltext_xpath).get("")
    if not checkExist(fulltext):
        raise Exception
    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, sel, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_wsjkwhenanlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        div_page = res.xpath("//div[@id='pageDec']")
        page_size = div_page.xpath("./@pagesize").get("")
        page_count = div_page.xpath("./@pagecount").get("")
        if len(page_size) > 0 and len(page_count) > 0:
            total_page = int((int(page_count) + int(page_size) - 1) / int(page_size))
        else:
            total_page = 1
        list_json = json.loads(callmodel.sql_model.list_json)
        list_rawid = callmodel.sql_model.list_rawid
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                page_info = "index_{}.html".format(page)
                dic = {"page_info": page_info}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = res.xpath('//ul[contains(@class,"listmain")]/li')
        if not checkExist(li_list):
            li_list = res.xpath('//div[contains(@class,"zfxxgk_zdgkc")]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            a_info = li.xpath('.//a')
            if not checkExist(a_info):
                continue
            href = a_info.xpath('./@href').get("")
            base_url = r'https://wsjkw.henan.gov.cn/{}/{}'.format(callmodel.sql_model.list_rawid,
                                                                  list_json["page_info"])
            if href.startswith("/"):
                url = 'https://wsjkw.henan.gov.cn{}'.format(href)
            else:
                url = parse.urljoin(base_url, href)
            if 'html' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99415'
            article_json["url"] = url
            article_json["title"] = a_info.xpath('./text()').extract_first().strip()
            pub_date = li.xpath("./b/text()")
            if not checkExist(pub_date):
                pub_date = clean_pubdate(''.join(li.xpath("./text()").extract()).strip())
            else:
                pub_date = clean_pubdate(pub_date.extract_first())
            if len(pub_date) != 8:
                raise Exception
            article_json["pub_date"] = pub_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_wsjkwhenanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_wsjkwhenanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])
    src_data = down_model["1_1"]
    sel = Selector(src_data.html)

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = "99415"
    product = "WSJKWHENAN"
    zt_provider = "wsjkwhenangovpolicy"
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    data["provider_url"] = article_json['url']
    title = ''.join(sel.xpath("//td[@class='title']//text()").extract()).strip()
    if checkExist(title):
        title = cleanSemicolon(title)
    else:
        title = article_json['title']
    data["title"] = cleanSemicolon(title)
    pub_date = article_json.get("pub_date", "")
    pub_year = ""
    if len(pub_date) == 8:
        pub_year = pub_date[0:4]
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year
    fulltext_xpath = '//div[@id="artibody"]|//div[@class="artibody"]'
    fulltext = sel.xpath(fulltext_xpath).extract_first()
    if not checkExist(fulltext):
        raise Exception
    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, sel, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_zhengzhoulist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            max_count = res.xpath('//div[@class="page-tile"]/a[last()-1]/text()').extract_first()
            max_count = int(max_count) if max_count else 1
            total_page = max_count
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"{list_json['page_info']}_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        list_rawid = callmodel.sql_model.list_rawid
        # if 'zfxxgk1' in list_rawid:
        li_list = res.xpath('//ul[@class="page-list"]/a')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('@href').extract_first()
            base_url = f'https://public.zhengzhou.gov.cn/'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99416'
            article_json["url"] = url
            article_json["title"] = li.xpath('span/text()').extract_first().strip()
            if 'a=dir' in list_rawid:
                article_json["pub_date"] = ''
            else:
                article_json["pub_date"] = li.xpath('b/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_zhengzhouarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_zhengzhouarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])
    src_data = down_model["1_1"]
    sel = Selector(src_data.html)

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = "99416"
    product = "ZHENGZHOU"
    zt_provider = "zhengzhougovpolicy"
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    data["provider_url"] = article_json['url']
    title = ''.join(sel.xpath("//div[@class='content-title']//text()").extract()).strip()
    if not title:
        title = ''.join(sel.xpath("//div[@class='content-wrap']/div[@class='multiline-title']//text()").extract()).strip()
    if checkExist(title):
        title = cleanSemicolon(title)
    else:
        title = article_json['title']
    data["title"] = cleanSemicolon(title)
    pub_date = clean_pubdate(article_json.get("pub_date", ""))
    pub_year = pub_date[0:4]
    if not pub_date:
        pub_date_info = ''.join(sel.xpath("//i[contains(@title, '发布日期')]/following::em[1]//text()").extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[0:4]
    if not pub_date:
        raise Exception
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year

    organ = ''.join(sel.xpath("//i[contains(@title, '发布机构')]/following::em[1]//text()").extract()).strip()
    if organ.startswith('省'):
        organ = '河南' + organ
    if len(organ) > 0:
        data["organ"] = organ
    else:
        data["organ"] = article_json.get("organ", "")
    written_date = clean_pubdate(
        ''.join(sel.xpath("//i[contains(@title, '成文日期')]/following::em[1]//text()").extract()).strip())
    if len(written_date) > 0:
        data["written_date"] = written_date
    else:
        data["written_date"] = article_json.get("written_date", "")
    pub_no = ''.join(sel.xpath("//i[contains(@title, '文号')]/following::em[1]//text()").extract()).strip()
    if len(pub_no) > 0:
        data["pub_no"] = pub_no
    else:
        data["pub_no"] = article_json.get("pub_no", "")
    index_no = ''.join(sel.xpath("//i[contains(@title, '索引号')]/following::em[1]//text()").extract()).strip()
    if len(index_no) > 0:
        data["pub_no"] = index_no
    else:
        data["pub_no"] = article_json.get("pub_no", "")
    legal_status = ''.join(sel.xpath("//i[contains(@title, '有效性')]/following::em[1]//text()").extract()).strip()
    if len(legal_status) > 0:
        data["legal_status"] = legal_status
    else:
        data["legal_status"] = article_json.get("legal_status", "")

    fulltext_xpath = "//div[contains(@class,'content-txt')]"
    fulltext = sel.xpath(fulltext_xpath).get("")
    if not checkExist(fulltext):
        raise Exception
    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, sel, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_kaifenglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:
        html = para_dicts["data"]["1_1"]['html']
        res = Selector(text=html)
        total_page = re.findall(r";共(\d+)页&", html)
        if checkExist(total_page):
            total_page = int(total_page[0])
        else:
            total_page = 1
        list_json = json.loads(callmodel.sql_model.list_json)
        list_rawid = callmodel.sql_model.list_rawid
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            page_size = int(re.findall(r"offset=(\d+)&", res.xpath("//a[contains(text(),'下一页')]/@href").get(""))[0])
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            tmp_json = copy.deepcopy(list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                tmp_json["offset"] = page * page_size
                sql_dict["list_json"] = json.dumps(tmp_json, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        tr_list = res.xpath(
            "//td[@class='xin2zuo' and not (contains(string(),'条记录'))]/table/tr[contains(string(),'[')]")
        for tr in tr_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            a_info = tr.xpath('.//a')
            if not checkExist(a_info):
                continue
            href = a_info.xpath('./@href').get("")
            base_url = r'https://www.kaifeng.gov.cn/viewCmsCac.do'
            if href.startswith("/"):
                url = 'https://www.kaifeng.gov.cn{}'.format(href)
            else:
                url = parse.urljoin(base_url, href)
            if 'html' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99417'
            article_json["url"] = url.strip()
            article_json["title"] = a_info.xpath('./text()').extract_first().strip()
            pub_date = tr.xpath('./td[@align="right"]/text()')
            if checkExist(pub_date):
                pub_date = clean_pubdate(pub_date.extract_first())
            if len(pub_date) != 8:
                raise Exception
            article_json["pub_date"] = pub_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_kaifengarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_kaifengarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])
    src_data = down_model["1_1"]
    sel = Selector(src_data.html)

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = "99417"
    product = "KAIFENG"
    zt_provider = "kaifenggovpolicy"
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    data["provider_url"] = article_json['url']
    title = ''.join(sel.xpath("//td[@class='title7']//text()").extract()).strip()
    if checkExist(title):
        title = cleanSemicolon(title)
    else:
        title = article_json['title']
    data["title"] = cleanSemicolon(title)
    # pub_date = article_json.get("pub_date", "")
    tmp_date = "".join(sel.xpath("//td[contains(string(),'发布时间')]/text()").extract()).replace("\n","").replace("\r","").replace("\t","")
    if checkExist(tmp_date.strip()):
        tmp_date = re.findall(r"发布时间：(.*?)\s*字体", tmp_date)
        if not checkExist(tmp_date):
            print(tmp_date,data["provider_url"],"".join(sel.xpath("//td[contains(string(),'时间')]/text()").extract()).replace("\n","").replace("\r","").replace("\t",""))
            raise Exception
    else:
        str_date = "".join(sel.xpath("//td[contains(string(),'时间')]/text()").extract()).replace("\n", "").replace("\r", "").replace("\t", "") + ";"
        tmp_date = re.findall(r"时间：(.*?)\s*浏览",str_date )
        if not checkExist(tmp_date):
            tmp_date = re.findall(r"时间：(.*?)\s*;", str_date)
        if not checkExist(tmp_date):
            print(tmp_date,"".join(sel.xpath("//td[contains(string(),'时间')]/text()").extract()).replace("\n","").replace("\r","").replace("\t",""))
            raise Exception
    pub_year = ""
    pub_date = clean_pubdate(tmp_date[0])
    if isVaildDate(pub_date):
        pub_year = pub_date[0:4]
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year

    fulltext_xpath = "//td[@class='kfHtmlFont']"
    fulltext = sel.xpath(fulltext_xpath).get("")
    if not checkExist(fulltext):
        raise Exception
    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, sel, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_lylist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:
        html = para_dicts["data"]["1_1"]['html']
        res = Selector(text=html)
        total_page = re.findall(r"zys = (\d+)", html)
        if checkExist(total_page):
            total_page = int(total_page[0])
        else:
            total_page = 1
        list_json = json.loads(callmodel.sql_model.list_json)
        list_rawid = callmodel.sql_model.list_rawid
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                page_info = "list-{}.html".format(page)
                dic = {"page_info": page_info}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = res.xpath('//ul[contains(@class,"headlines_today_list")]/li')
        if checkExist(li_list):
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                a_info = li.xpath('.//a')
                if not checkExist(a_info):
                    continue
                href = a_info.xpath('./@href').get("")
                base_url = r'http://www.ly.gov.cn/{}/{}'.format(callmodel.sql_model.list_rawid, list_json["page_info"])
                if href.startswith("/"):
                    url = 'http://www.ly.gov.cn{}'.format(href)
                else:
                    url = parse.urljoin(base_url, href)
                if 'html' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99418'
                article_json["url"] = url
                article_json["title"] = a_info.xpath('./text()').extract_first().strip()
                pub_date = li.xpath("./span/text()")
                if checkExist(pub_date):
                    pub_date = clean_pubdate(pub_date.extract_first())
                if len(pub_date) != 8 or (not pub_date.startswith("20") and not pub_date.startswith("19")):
                    pub_date = ""
                article_json["pub_date"] = pub_date
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            tr_list = res.xpath('//ul[contains(@class,"list-unstyled")]/li/table/tr')
            if checkExist(tr_list):
                for tr in tr_list:
                    temp = info_dicts.copy()
                    temp["task_tag"] = temp["task_tag_next"]
                    del temp["task_tag_next"]
                    article_json = dict()
                    a_info = tr.xpath('.//p/a')
                    if not checkExist(a_info):
                        continue
                    href = a_info.xpath('./@href').get("")
                    base_url = r'http://www.ly.gov.cn/{}/{}'.format(callmodel.sql_model.list_rawid,
                                                                    list_json["page_info"])
                    if href.startswith("/"):
                        url = 'http://www.ly.gov.cn{}'.format(href)
                    else:
                        url = parse.urljoin(base_url, href)
                    if 'html' not in url:
                        continue
                    rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                    temp["rawid"] = rawid
                    temp["sub_db_id"] = '99418'
                    article_json["url"] = url
                    article_json["title"] = a_info.xpath('./text()').extract_first().strip()
                    pub_date = tr.xpath(".//span/text()")

                    if checkExist(pub_date):
                        pub_date = clean_pubdate(pub_date.extract_first())
                    if len(pub_date) != 8 or (not pub_date.startswith("20") and not pub_date.startswith("19")):
                        pub_date = ""
                    article_json["pub_date"] = pub_date
                    temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                    di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_lyarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_lyarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])
    src_data = down_model["1_1"]
    sel = Selector(src_data.html)

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = "99418"
    product = "LY"
    zt_provider = "lygovpolicy"
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    data["provider_url"] = article_json['url']
    title = ''.join(sel.xpath("//ul[contains(@class,'suggestions_content_top')]/li/h2/text()").extract()).strip()
    if checkExist(title):
        title = cleanSemicolon(title)
    else:
        title = ''.join(sel.xpath("//ArticleTitle/text()").extract()).strip()
        if checkExist(title):
            title = cleanSemicolon(title)
        else:
            title = article_json['title']
    data["title"] = cleanSemicolon(title)
    pub_date = article_json.get("pub_date", "")
    pub_year = ""
    if len(pub_date) != 8:
        tmp_date = sel.xpath("//li[@id='rqLy']/text()").extract_first().replace("日期：", "").strip()
        if len(tmp_date) > 0:
            pub_date = clean_pubdate(tmp_date)
    if len(pub_date) == 8:
        pub_year = pub_date[0:4]
    else:
        raise Exception
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year

    organ = ''.join(sel.xpath("//span[contains(string(),'发文机关')]/following::span[1]/text()").extract()).strip()
    if organ.startswith('省'):
        organ = '河南' + organ
    data["organ"] = cleanSemicolon(organ)
    pub_no = ''.join(sel.xpath("//span[contains(string(),'文件编号')]/following::span[1]/text()").extract()).strip()
    data["pub_no"] = cleanSemicolon(pub_no)
    index_no = ''.join(sel.xpath("//span[contains(string(),'索引号')]/following::span[1]/text()").extract()).strip()
    data["index_no"] = cleanSemicolon(index_no)

    fulltext_xpath = "//div[@class='mailbox_content_wznrs']"
    fulltext = sel.xpath(fulltext_xpath).get("")
    if not checkExist(fulltext):
        fulltext_xpath = "//li[@class='wzxqnr']"
        fulltext = sel.xpath(fulltext_xpath).get("")
        if not checkExist(fulltext):
            raise Exception
    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, sel, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_pdslist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:
        html = para_dicts["data"]["1_1"]['html']
        res = Selector(text=html)
        list_json = json.loads(callmodel.sql_model.list_json)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            max_count = re.findall("a>1/(\d+)", para_dicts["data"]["1_1"]['html'])
            max_count = int(max_count[0]) if max_count else 1
            total_page = max_count
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            tmp_list_json = copy.deepcopy(list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                sql_dict["list_json"] = json.dumps(tmp_list_json, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        li_list = res.xpath("//ul/li")
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            a_info = li.xpath('.//a')
            if not checkExist(a_info) or a_info.xpath('./text()').get("").strip() == '标题':
                continue
            href = a_info.xpath('./@href').get("")
            base_url = r'https://www.pds.gov.cn/{}.html'.format(callmodel.sql_model.list_rawid)
            if href.startswith("/"):
                url = 'https://www.pds.gov.cn{}'.format(href)
            else:
                url = parse.urljoin(base_url, href)
            if 'html' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99419'
            article_json["url"] = url
            article_json["title"] = a_info.xpath('./text()').extract_first().strip()
            if callmodel.sql_model.list_rawid in ['31698', '31699', '31700', '31701']:
                pub_date = li.xpath("./span[4]/text()")
            else:
                pub_date = li.xpath("./span/text()")
            if checkExist(pub_date):
                pub_date = clean_pubdate(pub_date.extract_first())
            else:
                pub_date = ""
            if len(pub_date) != 8 or (not pub_date.startswith("20") and not pub_date.startswith("19")):
                pub_date = ""
            article_json["pub_date"] = pub_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_pdsarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_pdsarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])
    src_data = down_model["1_1"]
    sel = Selector(src_data.html)

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = "99419"
    product = "PDS"
    zt_provider = "pdsgovpolicy"
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    data["provider_url"] = article_json['url']
    title = sel.xpath("//div[@class='art-title']/text()")
    if checkExist(title):
        title = ''.join(title.extract())
    else:
        title = article_json['title']
    data["title"] = cleanSemicolon(title)
    # pub_date = article_json.get("pub_date", "")
    pub_year = ""
    pub_date = clean_pubdate(sel.xpath("//span[contains(string(), '发布日期')]/parent::span[1]/text()").get(""))
    if isVaildDate(pub_date):
        pub_year = pub_date[0:4]
    else:
        print(data["providerurl"], pub_date)
        raise Exception
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year

    pub_no = ''.join(sel.xpath("//li[contains(string(), '发文字号')]/following::li[1]//text()").extract()).strip()
    data["pub_no"] = cleanSemicolon(pub_no)
    index_no = ''.join(sel.xpath("//li[contains(string(), '索引号')]/following::li[1]//text()").extract()).strip()
    data["index_no"] = cleanSemicolon(index_no)
    legal_status = ''.join(sel.xpath("//li[contains(string(), '服务对象')]/following::li[2]//text()").extract()).strip()
    data["legal_status"] = cleanSemicolon(legal_status)

    fulltext_xpath = "//div[@class='article clearfloat']"
    fulltext = sel.xpath(fulltext_xpath).get("")
    if not checkExist(fulltext):
        fulltext_xpath = "//div[@class='article']"
        fulltext = sel.xpath(fulltext_xpath).get("")
        if not checkExist(fulltext):
            raise Exception
    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, sel, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_anyanglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        div_page = res.xpath("//div[@id='pageDec']")
        page_size = div_page.xpath("./@pagesize").get("")
        page_count = div_page.xpath("./@pagecount").get("")
        if len(page_size) > 0 and len(page_count) > 0:
            total_page = int((int(page_count) + int(page_size) - 1) / int(page_size))
        else:
            total_page = 1
        list_json = json.loads(callmodel.sql_model.list_json)
        list_rawid = callmodel.sql_model.list_rawid
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                page_info = "index_{}.html".format(page)
                dic = {"page_info": page_info}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = res.xpath('//ul[contains(@class,"newsList")]/li')
        if not checkExist(li_list):
            li_list = res.xpath('//div[contains(@class,"zfxxgk_zdgkc")]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            a_info = li.xpath('.//a')
            if not checkExist(a_info):
                continue
            href = a_info.xpath('./@href').get("")
            base_url = r'https://www.anyang.gov.cn/{}/{}'.format(callmodel.sql_model.list_rawid,
                                                                 list_json["page_info"])
            if href.startswith("/"):
                url = 'https://www.anyang.gov.cn{}'.format(href)
            else:
                url = parse.urljoin(base_url, href)
            if 'html' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99420'
            article_json["url"] = url
            article_json["title"] = a_info.xpath('./text()').extract_first().strip()
            pub_date = li.xpath("./b/text()")
            if not checkExist(pub_date):
                pub_date = clean_pubdate(li.xpath("./span/text()").extract())
            else:
                pub_date = clean_pubdate(pub_date.extract_first())
            if len(pub_date) != 8:
                raise Exception
            article_json["pub_date"] = pub_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_anyangarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_anyangarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])
    src_data = down_model["1_1"]
    sel = Selector(src_data.html)

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = "99420"
    product = "ANYANG"
    zt_provider = "anyanggovpolicy"
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    data["provider_url"] = article_json['url']
    title = ''.join(sel.xpath("//div[@class='titP']/p//text()").extract()).strip()
    if checkExist(title):
        title = cleanSemicolon(title)
    else:
        title = article_json['title']
    data["title"] = cleanSemicolon(title)
    pub_date = article_json.get("pub_date", "")
    pub_year = ""
    if len(pub_date) == 8:
        pub_year = pub_date[0:4]
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year

    organ = ''.join(sel.xpath("//b[contains(string(), '发文机关')]/following::td[1]/text()").extract()).strip()
    if organ.startswith('省'):
        organ = '河南' + organ
    data["organ"] = organ
    pub_no = ''.join(sel.xpath("//b[contains(string(), '发文字号')]/following::td[1]/text()").extract()).strip()
    data["pub_no"] = cleanSemicolon(pub_no)
    index_no = ''.join(sel.xpath("//b[contains(string(), '索')]/following::td[1]/text()").extract()).strip()
    data["index_no"] = cleanSemicolon(index_no)
    invalid_date = ''.join(sel.xpath("//b[contains(string(), '失效时间')]/following::td[1]//text()").extract()).strip()
    data["invalid_date"] = clean_pubdate(invalid_date)

    fulltext_xpath = "//div[@class='conBox']"
    fulltext = sel.xpath(fulltext_xpath).get("")
    if not checkExist(fulltext):
        raise Exception
    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, sel, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_hebilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:
        html = para_dicts["data"]["1_1"]['data']["html"]
        res = Selector(text=html)
        page_size = re.findall(r'rows=\\"(\d+)\\"', html)
        page_count = re.findall(r'count=\\"(\d+)\\"', html)
        if len(page_size) == 0 or len(page_count) == 0:
            page_size = re.findall(r"rows':'(\d+)'", html)
            page_count = re.findall(r"count':'(\d+)'", html)
        if len(page_size) > 0 and len(page_count) > 0:
            total_page = int((int(page_count[0]) + int(page_size[0]) - 1) / int(page_size[0]))
        else:
            total_page = 1
        list_json = json.loads(callmodel.sql_model.list_json)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                sql_dict["list_json"] = json.dumps(
                    {"page_info": page_info.replace("pageNo%22%3A1%2C%22", "pageNo%22%3A{}%2C%22".format(page))},
                    ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        if "国民经济和社会发展规划" in html:
            li_list = res.xpath("//label[contains(string(),'国民经济和社会发展规划')]/following::div[1]/ul/li")
        else:
            li_list = res.xpath("//div[@class='page-content']/ul/li")
        if checkExist(li_list):
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                a_info = li.xpath('.//a')
                if not checkExist(a_info):
                    continue
                href = a_info.xpath('./@href').get("")
                base_url = r'https://www.hebi.gov.cn/{}.html'.format(callmodel.sql_model.list_rawid)
                if href.startswith("/"):
                    url = 'https://www.hebi.gov.cn{}'.format(href)
                else:
                    url = parse.urljoin(base_url, href)
                if 'html' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99421'
                article_json["url"] = url
                article_json["title"] = cleanSemicolon("".join(a_info.xpath('./text()').extract()))
                pub_date = li.xpath("./span/text()")
                if checkExist(pub_date):
                    pub_date = clean_pubdate(pub_date.extract_first())
                else:
                    pub_date = ""
                if len(pub_date) != 8 or (not pub_date.startswith("20") and not pub_date.startswith("19")):
                    pub_date = ""
                article_json["pub_date"] = pub_date
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            tr_list = res.xpath("//div[@class='page-content']/table/tr")
            for tr in tr_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                a_info = tr.xpath('.//a')
                if not checkExist(a_info) or a_info.xpath('./text()').get("").strip() == '标题':
                    continue
                href = a_info.xpath('./@href').get("")
                base_url = r'https://www.hebi.gov.cn/{}.html'.format(callmodel.sql_model.list_rawid)
                if href.startswith("/"):
                    url = 'https://www.hebi.gov.cn{}'.format(href)
                else:
                    url = parse.urljoin(base_url, href)
                if 'html' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                print(rawid)
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99421'
                article_json["url"] = url
                article_json["title"] = a_info.xpath('./text()').extract_first().strip()
                pub_date = tr.xpath("./td[contains(@class,'zfhy_fwrq')]/text()")
                if checkExist(pub_date):
                    pub_date = clean_pubdate(pub_date.extract_first())
                else:
                    pub_date = ""
                if len(pub_date) != 8 or (not pub_date.startswith("20") and not pub_date.startswith("19")):
                    pub_date = ""
                article_json["pub_date"] = pub_date
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_hebiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_hebiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])
    src_data = down_model["1_1"]
    sel = Selector(src_data.html)

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = "99421"
    product = "HEBI"
    zt_provider = "hebigovpolicy"
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    data["provider_url"] = article_json['url']
    title = sel.xpath("//h1[contains(@class,'title')]/text()")
    if checkExist(title):
        title = ''.join(title.extract())
    else:
        title = article_json['title']
    data["title"] = cleanSemicolon(title)
    pub_date = article_json.get("pub_date", "")
    pub_year = ""
    if len(pub_date) != 8:
        pub_date = clean_pubdate(
            ''.join(sel.xpath("//li[contains(string(), '发布日期')]/following::li[1]//text()").extract()))
    if len(pub_date) == 8:
        pub_year = pub_date[0:4]
    else:
        raise Exception
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year

    pub_no = ''.join(sel.xpath("//span[contains(string(), '发文字号')]/following::span[1]//text()").extract()).strip()
    data["pub_no"] = cleanSemicolon(pub_no)
    index_no = ''.join(sel.xpath("//span[contains(string(), '信息索引号')]/following::span[1]//text()").extract()).strip()
    data["index_no"] = cleanSemicolon(index_no)

    fulltext_xpath = "//div[@class='zoom']"
    fulltext = sel.xpath(fulltext_xpath).get("")
    if not checkExist(fulltext):
        raise Exception
    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, sel, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_xinxianglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html = para_dicts["data"]["1_1"]['html']
        res = Selector(text=html)
        total_page = para_dicts["data"]["1_2"]["endPage"]
        list_json = json.loads(callmodel.sql_model.list_json)
        list_rawid = callmodel.sql_model.list_rawid
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                page_info = "list{}.html".format(page)
                dic = {"page_info": page_info}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        div_list = res.xpath('//div[contains(@class,"hap_infoOne")]')
        for div in div_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            a_info = div.xpath('.//a')
            if not checkExist(a_info):
                continue
            href = a_info.xpath('./@href').get("")
            base_url = r'http://www.xinxiang.gov.cn/{}/{}'.format(callmodel.sql_model.list_rawid,
                                                                  list_json["page_info"])
            if href.startswith("/"):
                url = 'http://www.xinxiang.gov.cn{}'.format(href)
            else:
                url = parse.urljoin(base_url, href)
            if 'html' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99422'
            article_json["url"] = url
            article_json["title"] = a_info.xpath('./text()').extract_first().strip()
            pub_date = div.xpath("./span[@class='hap_infoDate']/text()")
            if checkExist(pub_date):
                pub_date = clean_pubdate(pub_date.extract_first())
            if len(pub_date) != 8:
                raise Exception
            article_json["pub_date"] = pub_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_xinxiangarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_xinxiangarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])
    src_data = down_model["1_1"]
    sel = Selector(src_data.html)

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = "99422"
    product = "XINXIANG"
    zt_provider = "xinxianggovpolicy"
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    data["provider_url"] = article_json['url']
    title = ''.join(sel.xpath("//div[@class='hap_xq_tit']//text()").extract()).strip()
    if checkExist(title):
        title = cleanSemicolon(title)
    else:
        title = article_json['title']
    data["title"] = cleanSemicolon(title)
    # pub_date = article_json.get("pub_date", "")
    pub_year = ""
    pub_date = clean_pubdate(
        sel.xpath("//div[@class='hap_xq_data']/span[@id='visitCountArea']/preceding::span[1]/text()").get(""))
    if isVaildDate(pub_date):
        pub_year = pub_date[0:4]
    else:
        raise Exception
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year

    organ = ''.join(sel.xpath("//th[contains(string(), '发文机关')]/following::td[1]/text()").extract()).strip()
    if organ.startswith('省'):
        organ = '河南' + organ
    data["organ"] = organ
    pub_no = ''.join(sel.xpath("//th[contains(string(), '发文字号')]/following::td[1]/text()").extract()).strip()
    data["pub_no"] = cleanSemicolon(pub_no)
    index_no = ''.join(sel.xpath("//th[contains(string(), '索')]/following::td[1]/text()").extract()).strip()
    data["index_no"] = cleanSemicolon(index_no)
    invalid_date = ''.join(sel.xpath("//th[contains(string(), '失效时间')]/following::td[1]//text()").extract()).strip()
    data["invalid_date"] = clean_pubdate(invalid_date)
    written_date = ''.join(sel.xpath("//th[contains(string(), '成文日期')]/following::td[1]//text()").extract()).strip()
    data["written_date"] = clean_pubdate(written_date)

    fulltext_xpath = "//div[contains(@class,'article-detail')]"
    fulltext = sel.xpath(fulltext_xpath).get("")
    if not checkExist(fulltext):
        raise Exception
    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, sel, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_jiaozuolist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html = para_dicts["data"]["1_1"]['html']
        res = Selector(text=html)
        total_page = para_dicts["data"]["1_2"]["endPage"]
        list_json = json.loads(callmodel.sql_model.list_json)
        list_rawid = callmodel.sql_model.list_rawid
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                page_info = "list{}.html".format(page)
                dic = {"page_info": page_info}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        div_list = res.xpath('//div[contains(@class,"colRightOne")]')
        if not checkExist(div_list):
            div_list = res.xpath('//div[contains(@class,"govdocOne")]')
        for div in div_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            a_info = div.xpath('.//a')
            if not checkExist(a_info):
                continue
            href = a_info.xpath('./@href').get("")
            base_url = r'http://www.jiaozuo.gov.cn/{}/{}'.format(callmodel.sql_model.list_rawid,
                                                                 list_json["page_info"])
            if href.startswith("/"):
                url = 'http://www.jiaozuo.gov.cn{}'.format(href)
            else:
                url = parse.urljoin(base_url, href)
            if 'html' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99423'
            article_json["url"] = url
            article_json["title"] = a_info.xpath('./text()').extract_first().strip()
            pub_date = div.xpath("./span/text()")
            if not checkExist(pub_date):
                pub_date = div.xpath(".//span[@class='colInfoDate']/text()")
            pub_date = clean_pubdate(pub_date.extract_first())
            if len(pub_date) != 8:
                raise Exception
            article_json["pub_date"] = pub_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_jiaozuoarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jiaozuoarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])
    src_data = down_model["1_1"]
    sel = Selector(src_data.html)

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = "99423"
    product = "JIAOZUO"
    zt_provider = "jiaozuogovpolicy"
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    data["provider_url"] = article_json['url']
    title = ''.join(sel.xpath("//div[@class='cms-article-tit']//text()").extract()).strip()
    if len(title) > 0:
        title = cleanSemicolon(title)
    else:
        title = ''.join(sel.xpath(
            "//span[contains(string(), '标') and contains(string(), '题')]/following::span[1]/text()").extract()).strip()
        if len(title) > 0:
            title = cleanSemicolon(title)
        else:
            title = article_json['title']
    data["title"] = cleanSemicolon(title)
    pub_date = article_json.get("pub_date", "")
    pub_year = ""
    if len(pub_date) == 8:
        pub_year = pub_date[0:4]
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year

    organ = ''.join(sel.xpath("//span[contains(string(), '发布机关')]/following::span[1]/text()").extract()).strip()
    if organ.startswith('省'):
        organ = '河南' + organ
    data["organ"] = cleanSemicolon(organ)
    pub_no = ''.join(sel.xpath("//span[contains(string(), '发文字号')]/following::span[1]/text()").extract()).strip()
    data["pub_no"] = cleanSemicolon(pub_no)
    written_date = ''.join(sel.xpath("//th[contains(string(), '成文日期')]/following::td[1]//text()").extract()).strip()
    data["written_date"] = clean_pubdate(written_date)

    fulltext_xpath = "//div[contains(@class,'article-detail')]"
    fulltext = sel.xpath(fulltext_xpath).get("")
    if not checkExist(fulltext):
        raise Exception
    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, sel, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_puyanglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        href_page = res.xpath("//a[contains(string(),'尾') and contains(string(),'页')]/@href")
        if checkExist(href_page):
            total_page = int(re.findall(r"page=(\d+)", href_page.get(""))[0])
        else:
            total_page = 1
        list_json = json.loads(callmodel.sql_model.list_json)
        list_rawid = callmodel.sql_model.list_rawid
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": page_info.replace("page=1", "page={}".format(page))}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = res.xpath('//div[contains(@class,"jrpy_list-box")]/ul/li')
        if not checkExist(li_list):
            li_list = res.xpath('//ul[contains(@class,"news_list")]/li')
        if checkExist(li_list):
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                a_info = li.xpath('.//a')
                if not checkExist(a_info):
                    continue
                href = a_info.xpath('./@href').get("")
                base_url = r'http://www.puyang.gov.cn/{}/{}'.format(callmodel.sql_model.list_rawid,
                                                                    list_json["page_info"])
                if href.startswith("/"):
                    url = 'http://www.puyang.gov.cn{}'.format(href)
                else:
                    url = parse.urljoin(base_url, href)
                if 'html' not in url and 'asp' not in url:
                    continue
                if "html" in url:
                    rawid = re.findall(r'(.*?)\.', url.split('/')[-1])[0]
                else:
                    rawid = re.findall(r'id=(.*)', url.split('/')[-1])[0]
                if len(rawid) == 0:
                    raise Exception
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99424'
                article_json["url"] = url
                article_json["title"] = a_info.xpath('./text()').extract_first().strip()
                pub_date = li.xpath("./span/text()")
                if checkExist(pub_date):
                    pub_date = clean_pubdate(pub_date.extract_first())
                if len(pub_date) != 8:
                    raise Exception
                article_json["pub_date"] = pub_date
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            div_list = res.xpath('//div[contains(@class,"jsearch-result-box")]')
            for div in div_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                a_info = div.xpath("./div[@class='jsearch-result-title']//a")
                if not checkExist(a_info):
                    continue

                href = a_info.xpath('./@href').get("")
                base_url = r'http://www.puyang.gov.cn/{}/{}'.format(callmodel.sql_model.list_rawid,
                                                                    list_json["page_info"])
                if href.startswith("/"):
                    url = 'http://www.puyang.gov.cn{}'.format(href)
                else:
                    url = parse.urljoin(base_url, href)

                if 'html' not in url and 'asp' not in url:
                    continue
                if "html" in url:
                    rawid = re.findall(r'(.*?)\.', url.split('/')[-1])[0]
                else:
                    rawid = re.findall(r'id=(.*)', url.split('/')[-1])[0]
                if len(rawid) == 0:
                    raise Exception
                print(url, rawid)
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99424'
                article_json["url"] = url
                article_json["title"] = a_info.xpath('./text()').extract_first().strip()
                pub_date = div.xpath(".//span[@class='jsearch-result-date']/text()")
                pub_date = clean_pubdate(pub_date.extract_first())
                if len(pub_date) != 8:
                    raise Exception
                article_json["pub_date"] = pub_date
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_puyangarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_puyangarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])
    src_data = down_model["1_1"]
    sel = Selector(src_data.html)

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = "99424"
    product = "PUYANG"
    zt_provider = "puyanggovpolicy"
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    data["provider_url"] = article_json['url']
    title = ''.join(sel.xpath("//div[@class='details-main']/h1//text()").extract()).strip()
    if checkExist(title):
        title = cleanSemicolon(title)
    else:
        title = article_json['title']
    data["title"] = cleanSemicolon(title)
    pub_date = article_json.get("pub_date", "")
    pub_year = ""
    if len(pub_date) == 8:
        pub_year = pub_date[0:4]
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year

    organ = ''.join(sel.xpath("//b[contains(string(), '发文机关')]/following::td[1]/text()").extract()).strip()
    if not organ:
        organ_info = ''.join(sel.xpath('//em/font[contains(text(),"发布机构：")]/text()').extract()).strip()
        organ_info = re.findall('发布机构：(.*?)\s', organ_info)
        organ = organ_info[0].strip() if organ_info else ''
    if organ.startswith('市'):
        organ = '濮阳' + organ
    if organ.startswith('省'):
        organ = '河南' + organ
    data["organ"] = cleanSemicolon(organ)
    pub_no = ''.join(sel.xpath("//b[contains(string(), '发文字号')]/following::td[1]/text()").extract()).strip()
    data["pub_no"] = cleanSemicolon(pub_no)
    index_no = ''.join(sel.xpath("//b[contains(string(), '索')]/following::td[1]/text()").extract()).strip()
    data["index_no"] = cleanSemicolon(index_no)

    fulltext_xpath = '//div[@class="content"]|//div[@id="changesize"]'
    fulltext = sel.xpath(fulltext_xpath).get("")
    if not checkExist(fulltext):
        fulltext_xpath = "//div[@class='show-nr-b ft-zhengfu']"
        fulltext = sel.xpath(fulltext_xpath).get("")
        if not checkExist(fulltext):
            raise Exception
    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, sel, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_xuchanglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        href_page = res.xpath("//a[contains(string(),'尾') and contains(string(),'页')]/@href")
        if not checkExist(href_page):
            href_page = res.xpath("//a[contains(string(),'末') and contains(string(),'页')]/@href")
        if checkExist(href_page):
            total_page = int(re.findall(r"\/(\d+).html", href_page.get(""))[0])
        else:
            total_page = 1
        list_json = json.loads(callmodel.sql_model.list_json)
        list_rawid = callmodel.sql_model.list_rawid
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": "{}.html".format(page)}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = res.xpath('//ul[contains(@class,"ewb-info-list")]/li')
        if checkExist(li_list):
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"].split(";")[0]
                del temp["task_tag_next"]
                article_json = dict()
                a_info = li.xpath('.//a')
                if not checkExist(a_info):
                    continue
                href = a_info.xpath('./@href').get("")
                base_url = r'http://www.xuchang.gov.cn/{}/{}'.format(callmodel.sql_model.list_rawid,
                                                                     list_json["page_info"])
                if href.startswith("/"):
                    url = 'http://www.xuchang.gov.cn{}'.format(href)
                else:
                    url = parse.urljoin(base_url, href)
                if 'html' not in url:
                    continue
                if "html" in url:
                    rawid = re.findall(r'(.*?)\.', url.split('/')[-1])[0]
                if len(rawid) == 0:
                    raise Exception
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99425'
                article_json["url"] = url
                article_json["title"] = a_info.xpath('./text()').extract_first().strip()
                pub_date = li.xpath("./span/text()")
                if checkExist(pub_date):
                    pub_date = clean_pubdate(pub_date.extract_first())
                if len(pub_date) != 8:
                    raise Exception
                article_json["pub_date"] = pub_date
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            tr_list = res.xpath('//div[contains(@class,"table-responsive")]/table/tbody/tr')
            for tr in tr_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"].split(";")[1]
                del temp["task_tag_next"]
                article_json = dict()

                a_info = tr.xpath(".//a[@class='ewb-infoname']")
                if not checkExist(a_info):
                    continue
                page_info = a_info.xpath("./@onclick").extract_first()
                if page_info:
                    tmps = re.findall(r'linkToNew\((.*?)\);', page_info)
                    if checkExist(tmps):
                        rawid = tmps[0].split(",")[0].replace("'", "")
                if not checkExist(rawid):
                    raise Exception
                url = 'http://www.xuchang.gov.cn/openDetailDynamic.html?infoid={}'.format(rawid)
                if 'html' not in url:
                    continue
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99425'
                article_json["url"] = url
                article_json["title"] = a_info.xpath('./text()').extract_first().strip()
                pub_date = tr.xpath("./td[@class='ewb-rqnr']/span/text()")
                pub_date = clean_pubdate(pub_date.extract_first())
                if len(pub_date) != 8:
                    raise Exception
                article_json["pub_date"] = pub_date
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_xuchangarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_xuchangarticle1_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_xuchangarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])
    src_data = down_model["1_1"]
    sel = Selector(src_data.html)

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = "99425"
    product = "XUCHANG"
    zt_provider = "xuchanggovpolicy"
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    data["provider_url"] = article_json['url']
    title = ''.join(sel.xpath("//h3[@class='ewb-info-tt']//text()").extract()).strip()
    if checkExist(title):
        title = cleanSemicolon(title)
    else:
        title = article_json['title']
    data["title"] = cleanSemicolon(title)
    pub_date = article_json.get("pub_date", "")
    pub_year = ""
    if isVaildDate(pub_date):
        pub_year = pub_date[0:4]
    else:
        raise Exception
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year

    organ = ''.join(sel.xpath("//label[contains(string(), '发布机构')]/following::b[1]/text()").extract()).strip()
    if organ.startswith('省'):
        organ = '河南' + organ
    data["organ"] = cleanSemicolon(organ)
    pub_no = ''.join(
        sel.xpath("//b[contains(string(), '文') and contains(string(), '号')]/following::td[1]/text()").extract()).strip()
    data["pub_no"] = cleanSemicolon(pub_no)
    index_no = ''.join(sel.xpath("//label[contains(string(), '索')]/following::b[1]/text()").extract()).strip()
    data["index_no"] = cleanSemicolon(index_no)

    fulltext_xpath = "//div[@class='pstyle']"
    fulltext = sel.xpath(fulltext_xpath).get("")
    if not checkExist(fulltext):
        fulltext_xpath = "//div[@class='ewb-article-content']"
        fulltext = sel.xpath(fulltext_xpath).get("")
        if not checkExist(fulltext):
            raise Exception
    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, sel, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_xuchangarticle1_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])
    src_data = down_model["1_1"]
    dinfo = json.loads(json.loads(src_data.html)["custom"])["data"][0]
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = "99425"
    product = "XUCHANG"
    zt_provider = "xuchanggovpolicy"
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    data["provider_url"] = article_json['url']
    title = dinfo.get("title", "")
    if checkExist(title):
        title = cleanSemicolon(title)
    else:
        title = article_json['title']
    data["title"] = cleanSemicolon(title)
    pub_date = article_json.get("pub_date", "")
    pub_year = ""
    if isVaildDate(pub_date):
        pub_year = pub_date[0:4]
    else:
        raise Exception
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year

    organ = dinfo.get("deptname","")
    if organ.startswith('省'):
        organ = '河南' + organ
    data["organ"] = cleanSemicolon(organ)
    pub_no = dinfo.get("documentnumber","")
    data["pub_no"] = cleanSemicolon(pub_no)
    index_no = dinfo.get("identifier","")
    data["index_no"] = cleanSemicolon(index_no)

    fulltext = dinfo.get("infocontent","")
    if not checkExist(fulltext):
        if not checkExist(fulltext):
            raise Exception
    sel = Selector(text=fulltext)
    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, sel, f'(//body)')
    file_info2 = list()
    file_infos = dinfo.get('attach', "")
    if type(file_infos) is list:
        for file_info in file_infos:
            purl = 'http://admin.xuchang.gov.cn:8080/EpointWebBuilder/frame/base/attach/attachdown.jspx?attachGuid=' + file_info["attachGuid"]
            dic = {'url': purl, 'name': file_info["name"], 'pub_year': pub_year, 'keyid': lngid}
            file_info2.append(dic)
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_luohelist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html = para_dicts["data"]["1_1"]['html']
        res = Selector(text=html)
        total_page = para_dicts["data"]["1_2"]["endPage"]
        list_json = json.loads(callmodel.sql_model.list_json)
        list_rawid = callmodel.sql_model.list_rawid
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                page_info = "list{}.html".format(page)
                dic = {"page_info": page_info}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = res.xpath("//ul[@id='articleListTable']/li")

        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            a_info = li.xpath('.//a')
            if not checkExist(a_info):
                continue
            href = a_info.xpath('./@href').get("")
            base_url = r'http://www.luohe.gov.cn/{}/{}'.format(callmodel.sql_model.list_rawid,
                                                               list_json["page_info"])
            if href.startswith("/"):
                url = 'http://www.luohe.gov.cn{}'.format(href)
            else:
                url = parse.urljoin(base_url, href)
            if 'html' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99426'
            article_json["url"] = url
            article_json["title"] = a_info.xpath('./text()').extract_first().strip()
            pub_date = li.xpath("./b/text()")
            if not checkExist(pub_date):
                pub_date = li.xpath("./span/text()")
            pub_date = clean_pubdate(pub_date.extract_first())
            if len(pub_date) != 8:
                raise Exception
            article_json["pub_date"] = pub_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_luohearticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_luohearticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])
    src_data = down_model["1_1"]
    sel = Selector(src_data.html)

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = "99426"
    product = "LUOHE"
    zt_provider = "luohegovpolicy"
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    data["provider_url"] = article_json['url']
    title = ''.join(sel.xpath("//div[contains(@class,'lh-wzbt')]//text()").extract()).strip()
    if checkExist(title):
        title = cleanSemicolon(title)
    else:
        title = article_json['title']
    data["title"] = cleanSemicolon(title)
    pub_date = article_json.get("pub_date", "")
    pub_year = ""
    if len(pub_date) == 8:
        pub_year = pub_date[0:4]
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year

    fulltext_xpath = "//div[@id='content']"
    fulltext = sel.xpath(fulltext_xpath).get("")
    if not checkExist(fulltext):
        raise Exception
    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, sel, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_smxlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            max_count = re.findall("页次：1/(\d+)页", para_dicts["data"]["1_1"]['html'])
            max_count = int(max_count[0]) if max_count else 1
            total_page = max_count
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"{list_json['page_info']}_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'zfxxgk1' in list_rawid:
        li_list = res.xpath('//div[@class="wzlb"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'https://www.smx.gov.cn/pageView/zhengfuxinxi.html'
            # base_url = f'http://www.bjchp.gov.cn'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url or 'pageNum=' in url:
                continue
            if 'wzid' in url:
                rawid = re.findall('wzid=(.*?)&', url.split('/')[-1])[0]
            else:
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99427'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/span[contains(@class,"qians")]/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('a/span[contains(@class,"hous")]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_smxarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_smxarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])
    src_data = down_model["1_1"]
    sel = Selector(src_data.html)

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = "99427"
    product = "SMX"
    zt_provider = "smxgovpolicy"
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    data["provider_url"] = article_json['url']
    title = ''.join(sel.xpath("//div[@class='titleBox']/p//text()").extract()).strip()
    if checkExist(title):
        title = cleanSemicolon(title)
    else:
        title = article_json['title']
    data["title"] = cleanSemicolon(title)
    pub_date = clean_pubdate(article_json.get("pub_date", ""))
    pub_year = ""
    if len(pub_date) == 8:
        pub_year = pub_date[0:4]
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year

    organ = ''.join(sel.xpath(
        "//span[contains(string(), '发文机关')]/parent::div[1]/span[@class='item-value']//text()").extract()).strip()
    if organ.startswith('省'):
        organ = '河南' + organ
    data["organ"] = cleanSemicolon(organ)
    pub_no = ''.join(sel.xpath(
        "//span[contains(string(), '发文字号')]/parent::div[1]/span[@class='item-value']//text()").extract()).strip()
    data["pub_no"] = cleanSemicolon(pub_no)
    index_no = ''.join(
        sel.xpath("//span[contains(string(), '索')]/parent::div[1]/span[@class='item-value']//text()").extract()).strip()
    data["index_no"] = cleanSemicolon(index_no)
    written_date = ''.join(sel.xpath(
        "//span[contains(string(), '成文日期')]/parent::div[1]/span[@class='item-value']//text()").extract()).strip()
    data['written_date'] = clean_pubdate(written_date)
    invalid_date = ''.join(sel.xpath(
        "//span[contains(string(), '失效时间')]/parent::div[1]/span[@class='item-value']//text()").extract()).strip()
    data["invalid_date"] = clean_pubdate(invalid_date)

    fulltext_xpath = "//div[@class='articleContent']"
    fulltext = sel.xpath(fulltext_xpath).get("")
    if not checkExist(fulltext):
        fulltext_xpath = "//div[@class='show-nr-b ft-zhengfu']"
        fulltext = sel.xpath(fulltext_xpath).get("")
        if not checkExist(fulltext):
            raise Exception
    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, sel, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_zhoukoulist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            page_url = "http://www.zhoukou.gov.cn" + re.findall('scriptJsonPath="(.*?)"', para_dicts["data"]["1_1"]['html'])[0]
            proxies_list = [
                {'http': 'socks5://192.168.31.171:10087', 'https': 'socks5://192.168.31.171:10087'},
                {'http': 'socks5://192.168.31.171:10088', 'https': 'socks5://192.168.31.171:10088'},
                {'http': 'socks5://192.168.31.171:10089', 'https': 'socks5://192.168.31.171:10089'},
                {'http': 'socks5://192.168.31.171:10090', 'https': 'socks5://192.168.31.171:10090'},
                {'http': 'socks5://192.168.31.171:10091', 'https': 'socks5://192.168.31.171:10091'},
            ]
            proxies = random.choice(proxies_list)
            headers = {
                "Accept": "*/*",
                "Accept-Encoding": "gzip, deflate, br",
                "Accept-Language": "zh-CN,zh;q=0.9",
                "Cache-Control": "no-cache",
                "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
                "X-Requested-With": "XMLHttpRequest",
            }
            page_res = requests.get(page_url, headers=headers, verify=False, proxies=proxies)
            max_count = re.findall('"endPage":(\d+)', page_res.text)
            max_count = int(max_count[0]) if max_count else 1
            total_page = max_count
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"{list_json['page_info']}_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'zfxxgk1' in list_rawid:
        li_list = res.xpath('//div[@class="picboxcon"]/div')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.zhoukou.gov.cn/template/viewCatalogList'
            # base_url = f'http://www.bjchp.gov.cn'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99429'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/div[1]/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('a/div[2]/font/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_zhoukouarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_zhoukouarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])
    src_data = down_model["1_1"]
    sel = Selector(src_data.html)

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = "99429"
    product = "ZHOUKOU"
    zt_provider = "zhoukougovpolicy"
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    data["provider_url"] = article_json['url']
    title = ''.join(sel.xpath("//div[@class='cms-article-tit']//text()").extract()).strip()
    if checkExist(title):
        title = cleanSemicolon(title)
    else:
        title = article_json['title']
    data["title"] = cleanSemicolon(title)
    pub_date = clean_pubdate(article_json.get("pub_date", ""))
    pub_year = ""
    if len(pub_date) == 8:
        pub_year = pub_date[0:4]
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year

    fulltext_xpath = "//div[@id='articleDetail']"
    fulltext = sel.xpath(fulltext_xpath).get("")
    if not checkExist(fulltext):
        raise Exception
    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, sel, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_zhumadianlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html = para_dicts["data"]["1_1"]['html']
        res = Selector(text=html)
        page_info = res.xpath("//script[contains(text(),'PageListBar')]/text()")
        if checkExist(page_info):
            tmps = re.findall(r'PageListBar\((.*?)\);', page_info.extract_first())
            if checkExist(tmps):
                tmps = tmps[0].split(",")
                total_page = int(tmps[1])
            else:
                total_page = 1
        else:
            page_info = re.findall(r"total_in = (\d+);", html)
            if checkExist(page_info):
                page_size = 25
                page_count = int(page_info[0])
                if page_size > 0:
                    total_page = int((int(page_count) + int(page_size) - 1) / int(page_size))
                else:
                    total_page = 1
        list_json = json.loads(callmodel.sql_model.list_json)
        list_rawid = callmodel.sql_model.list_rawid
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": page_info.replace("currpage=1", "currpage={}".format(page))}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        tr_list = res.xpath("//div[@class='html_list_text']/table/tr")
        if checkExist(tr_list):
            for tr in tr_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                a_info = tr.xpath('./td[@height="25"]/a')
                if not checkExist(a_info):
                    continue
                href = a_info.xpath('./@href').get("")
                base_url = r'https://www.zhumadian.gov.cn/{}/{}'.format(callmodel.sql_model.list_rawid,
                                                                        list_json["page_info"])
                if href.startswith("/"):
                    url = 'https://www.zhumadian.gov.cn{}'.format(href)
                else:
                    url = parse.urljoin(base_url, href)
                if 'html' not in url:
                    continue
                if "html" in url:
                    rawid = re.findall(r'(.*?)\.', url.split('/')[-1])[0]
                if len(rawid) == 0:
                    raise Exception
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99430'
                article_json["url"] = url
                article_json["title"] = a_info.xpath("./text()").extract_first().strip()
                pub_date = tr.xpath("./td[3]/text()")
                if checkExist(pub_date):
                    pub_date = clean_pubdate(pub_date.extract_first())
                if len(pub_date) != 8:
                    raise Exception
                article_json["pub_date"] = pub_date
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            dinfo = re.findall(r"strtext_in \+=\s*(.*?);", html)
            if not checkExist(dinfo):
                raise Exception
            dinfo = "".join(dinfo)
            res = Selector(text=dinfo.replace("\\", ""))
            div_list = res.xpath("//div[@class='tcmstextlisttitle']")
            for div in div_list:

                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                a_info = div.xpath('./a')
                if not checkExist(a_info):
                    continue
                href = a_info.xpath('./@href').get("")
                base_url = r'https://www.zhumadian.gov.cn/{}/{}'.format(callmodel.sql_model.list_rawid,
                                                                        list_json["page_info"])
                if href.startswith("/"):
                    url = 'https://www.zhumadian.gov.cn{}'.format(href)
                else:
                    url = parse.urljoin(base_url, href)

                if 'html' not in url:
                    continue
                if "html" in url:
                    rawid = re.findall(r'(.*?)\.', url.split('/')[-1])[0]
                if len(rawid) == 0:
                    raise Exception
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99430'
                article_json["url"] = url
                article_json["title"] = a_info.xpath("./text()").extract_first().strip()
                article_json["pub_date"] = ""
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_zhumadianarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_zhumadianarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])
    src_data = down_model["1_1"]
    sel = Selector(src_data.html)

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = "99430"
    product = "ZHUMADIAN"
    zt_provider = "zhumadiangovpolicy"
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    data["provider_url"] = article_json['url']
    title = ''.join(sel.xpath("//div[contains(@class,'zw-con')]/h1//text()").extract()).strip()
    if checkExist(title):
        title = cleanSemicolon(title)
    else:
        title = article_json['title']
    data["title"] = cleanSemicolon(title)
    pub_date = clean_pubdate(sel.xpath("//span[contains(@class,'ml-30') and contains(string(),'时间')]/text()").get("").replace("时间：",""))
    pub_year = ""
    if isVaildDate(pub_date):
        pub_year = pub_date[0:4]
    else:
        raise Exception
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year

    organ = ''.join(sel.xpath("//td[contains(string(), '信息来源')]/following::td[1]//text()").extract()).strip()
    if organ.startswith('省'):
        organ = '河南' + organ
    data["organ"] = cleanSemicolon(organ)
    pub_no = ''.join(sel.xpath(
        "//td[contains(string(), '文') and contains(string(), '号')]/following::td[1]//text()").extract()).strip()
    data["pub_no"] = cleanSemicolon(pub_no)
    index_no = ''.join(sel.xpath("//td[contains(string(), '索')]/following::td[1]//text()").extract()).strip()
    data["index_no"] = cleanSemicolon(index_no)
    written_date = ''.join(sel.xpath("//td[contains(string(), '发文日期')]/following::td[1]//text()").extract()).strip()
    data['written_date'] = clean_pubdate(written_date)
    legal_status = ''.join(sel.xpath(
        "//td[contains(string(), '有') and contains(string(), '效')]/following::td[1]/text()").extract()).strip()
    data["legal_status"] = clean_pubdate(legal_status)

    fulltext_xpath = "//div[contains(@class,'zw-con')]"
    fulltext = sel.xpath(fulltext_xpath).get("")
    if not checkExist(fulltext):
        raise Exception
    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, sel, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_nanyanglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html = para_dicts["data"]["1_1"]['html']
        res = Selector(text=html)
        page_info = re.findall(r"共(\d+)页", html)
        if checkExist(page_info):
            total_page = int(page_info[0])
        else:
            total_page = 1
        list_json = json.loads(callmodel.sql_model.list_json)
        list_rawid = callmodel.sql_model.list_rawid
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": page_info.replace("cur_page=1", "cur_page={}".format(page))}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        tr_list = res.xpath("//div[@class='info-list']/table/tbody/tr")
        if checkExist(tr_list):
            for tr in tr_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                a_info = tr.xpath('./td[@class="tit"]/a')
                if not checkExist(a_info):
                    continue
                href = a_info.xpath('./@href').get("")
                base_url = r'http://www.nanyang.gov.cn/{}'.format(list_json["page_info"])
                if href.startswith("/"):
                    url = 'http://www.nanyang.gov.cn{}'.format(href)
                else:
                    url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue
                if "htm" in url:
                    rawid = re.findall(r'(.*?)\.', url.split('/')[-1])[0]
                if len(rawid) == 0:
                    raise Exception
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99431'
                article_json["url"] = url
                article_json["title"] = a_info.xpath("./text()").extract_first().strip()
                pub_date = tr.xpath("./td[3]/text()")
                if checkExist(pub_date):
                    pub_date = clean_pubdate(pub_date.extract_first())
                if not isVaildDate(pub_date):
                    raise Exception
                article_json["pub_date"] = pub_date
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)

                di_model_next.lists.append(temp)

        else:
            li_list = res.xpath("//ul[@class='list-t']/li")
            if not checkExist(li_list):
                li_list = res.xpath("//div[@class='section']/ul/li")
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                a_info = li.xpath('.//a')
                if not checkExist(a_info):
                    continue
                href = a_info.xpath('./@href').get("")
                base_url = r'http://www.nanyang.gov.cn/{}'.format(list_json["page_info"])
                if href.startswith("/"):
                    url = 'http://www.nanyang.gov.cn{}'.format(href)
                else:
                    url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue
                if "htm" in url:
                    rawid = re.findall(r'(.*?)\.', url.split('/')[-1])[0]
                if len(rawid) == 0:
                    raise Exception
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99431'
                article_json["url"] = url
                article_json["title"] = a_info.xpath("./text()").extract_first().strip()
                pub_date = li.xpath("./span/text()")
                if checkExist(pub_date):
                    pub_date = clean_pubdate(pub_date.extract_first())
                if not isVaildDate(pub_date):
                    raise Exception
                article_json["pub_date"] = pub_date
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)
    return result


def policy_nanyangarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_nanyangarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])
    src_data = down_model["1_1"]
    sel = Selector(src_data.html)

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = "99431"
    product = "NANYANG"
    zt_provider = "nanyanggovpolicy"
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    data["provider_url"] = article_json['url']
    title = ''.join(sel.xpath("//div[contains(@class,'cont')]/h3//text()").extract()).strip()
    if checkExist(title):
        title = cleanSemicolon(title)
    else:
        title = ''.join(sel.xpath("//div[contains(@class,'article')]/h2//text()").extract()).strip()
        if checkExist(title):
            title = cleanSemicolon(title)
        else:
            raise Exception
    data["title"] = cleanSemicolon(title)
    pub_date = article_json["pub_date"]
    pub_year = pub_date[0:4]
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year


    fulltext_xpath = "//div[contains(@class,'article-box')]"
    fulltext = sel.xpath(fulltext_xpath).get("")
    if not checkExist(fulltext):
        fulltext_xpath = "//div[contains(@class,'files')]"
        fulltext = sel.xpath(fulltext_xpath).get("")
        if not checkExist(fulltext):
            raise Exception
    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, sel, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_xinyanglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html = para_dicts["data"]["1_1"]['html']
        res = Selector(text=html)
        div_page = res.xpath("//div[@id='pageDec']")
        page_size = div_page.xpath("./@pagesize").get("")
        page_count = div_page.xpath("./@pagecount").get("")
        if len(page_size) > 0 and len(page_count) > 0:
            total_page = int((int(page_count) + int(page_size) - 1) / int(page_size))
        else:
            total_page = 1
        list_json = json.loads(callmodel.sql_model.list_json)
        list_rawid = callmodel.sql_model.list_rawid
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": "index_{}.html".format(page)}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = res.xpath("//ul[contains(@class,'new-special-list')]/li")
        if not checkExist(li_list):
            li_list = res.xpath("//div[contains(@class,'zfxxgk_zdgkc')]/ul/li")
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            a_info = li.xpath('./a')
            if not checkExist(a_info):
                continue
            href = a_info.xpath('./@href').get("")
            base_url = r'https://www.xinyang.gov.cn/{}/{}'.format(callmodel.sql_model.list_rawid,
                                                                    list_json["page_info"])
            if href.startswith("/"):
                url = 'https://www.xinyang.gov.cn{}'.format(href)
            else:
                url = parse.urljoin(base_url, href)

            if 'html' not in url:
                continue
            if "html" in url:
                rawid = re.findall(r'(.*?)\.', url.split('/')[-1])[0]
            if len(rawid) == 0:
                raise Exception
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99432'
            article_json["url"] = url
            article_json["title"] = a_info.xpath("./text()").extract_first().strip()
            pub_date = a_info.xpath("./span/text()")
            if not checkExist(pub_date):
                pub_date = li.xpath("./b/text()")
            pub_date = clean_pubdate(pub_date.extract_first())
            if not isVaildDate(pub_date):
                raise Exception
            article_json["pub_date"] = pub_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_xinyangarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_xinyangarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])
    src_data = down_model["1_1"]
    sel = Selector(src_data.html)

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = "99432"
    product = "XINYANG"
    zt_provider = "xinyanggovpolicy"
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    data["provider_url"] = article_json['url']
    title = ''.join(sel.xpath("//div[contains(@class,'details-main')]/h1//text()").extract()).strip()
    if checkExist(title):
        title = cleanSemicolon(title)
    else:
        title = ''.join(sel.xpath("//b[contains(string(), '标')]/following::td[1]//text()").extract()).strip()
        if checkExist(title):
            title = cleanSemicolon(title)
        else:
            raise Exception
    data["title"] = cleanSemicolon(title)
    pub_date = article_json["pub_date"]
    pub_year = pub_year = pub_date[0:4]
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year

    organ = ''.join(sel.xpath("//b[contains(string(), '发文机关')]/following::td[1]//text()").extract()).strip()
    if organ.startswith('省'):
        organ = '河南' + organ
    data["organ"] = cleanSemicolon(organ)
    pub_no = ''.join(sel.xpath("//b[contains(string(), '发文字号')]/following::td[1]//text()").extract()).strip()
    data["pub_no"] = cleanSemicolon(pub_no)
    index_no = ''.join(sel.xpath("//b[contains(string(), '索')]/following::td[1]//text()").extract()).strip()
    data["index_no"] = cleanSemicolon(index_no)
    written_date = ''.join(sel.xpath("//td[contains(string(), '成文日期')]/following::td[1]//text()").extract()).strip()
    data['written_date'] = clean_pubdate(written_date)
    legal_status = ''.join(sel.xpath("//b[contains(string(), '有') and contains(string(), '效')]/following::td[1]/text()").extract()).strip()
    data["legal_status"] = clean_pubdate(legal_status)

    fulltext_xpath = "//div[@id='content']"
    fulltext = sel.xpath(fulltext_xpath).get("")
    if not checkExist(fulltext):
        raise Exception
    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, sel, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   天津市滨海新区
def policy_tjbhlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = res.xpath('//a[contains(text(),"末页")]/@href').extract_first()
        if page_info:
            max_count = re.findall('_(\d+)\.', page_info)
            if not max_count:
                max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(max_count[0]) if max_count else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//li[contains(@class,"xxgk-list")]|//div[@class="sec-list"]/ul//li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('div/div[1]/a/@href').extract_first()

            if href.startswith('/'):
                base_url = f'http://www.tjbh.gov.cn'
                url = base_url + href
            else:
                base_url = f'http://www.tjbh.gov.cn/{callmodel.sql_model.list_rawid}.html'
                url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99181'
            article_json["url"] = url
            article_json["title"] = li.xpath('div/div[1]/a/@title|div/div[1]/a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('div/div[2]/span/text()').extract_first().replace('发布日期：', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_tjbharticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_tjbharticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(
        res.xpath('//span[contains(text(),"名") and contains(text(),"称")]/parent::div[1]/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//span[@id="MainTitle"]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(
        res.xpath('//span[contains(text(),"文") and contains(text(),"号")]/parent::div[1]/text()').extract()).strip()
    index_no = ''.join(
        res.xpath('//span[contains(text(),"索") and contains(text(),"号")]/parent::div[1]/text()').extract()).strip()
    subject = ''.join(
        res.xpath('//span[contains(text(),"主") and contains(text(),"类")]/parent::div[1]/font/text()').extract()).strip()
    written_date = ''.join(
        res.xpath('//span[contains(text(),"成") and contains(text(),"期")]/parent::div[1]/text()').extract()).strip()
    legal_status = ''.join(
        res.xpath('//span[contains(text(),"有") and contains(text(),"性")]/parent::div[1]/text()').extract()).strip()
    organ = ''.join(
        res.xpath('//span[contains(text(),"发") and contains(text(),"构")]/parent::div[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('市'):
        organ = '天津' + organ

    fulltext_xpath = '//div[@id="zoomcon"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99181'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "TJBH"
    zt_provider = "tjbhcngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


# 湖南省发展和改革委员会
def policy_fgwhunanlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # data = re.sub(".*result\((.*)\);", "\\1", para_dicts["data"]["1_1"]['html'])
        all_data = json.loads(para_dicts["data"]["1_1"]['html'])
        total = all_data["data"]["total"]
        total_page = math.ceil(int(total) / 20)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = all_data["data"]["results"]

        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["url"]
            title = li["title"]
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99456'

            article_json["url"] = url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()

            article_json["pub_date"] = clean_pubdate(
                li["publishedTimeStr"].replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_fgwhunanlist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # page_info_before = res.xpath("//div[@class='pages']//a[contains(string(), '尾页')]").extract()
        page_info = re.findall('createPageHTML.*?\(\'paging\', *(\d+),', para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            page_info = re.findall('Pager\(\{size:(\d+),', para_dicts["data"]["1_1"]['html'])
            if page_info:
                max_count = int(page_info[0]) if page_info else 1
                total_page = max_count
            else:
                total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//div[contains(@class, "mian-1")]/ul/li')
        li_list = res.xpath('//table[@class="table"]/tbody/tr')
        if not li_list:
            li_list = res.xpath('//div[@class="zlyjq"]/ul/li')
        if not li_list:
            li_list = res.xpath('//ul[@class="table"]/li')
        url_path = './td/a/@href'
        title_path = './td/a/@title'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath('./td/a/text()').get()
            if url is None:
                # url = li.xpath('./a/@href').get()
                # if url is None:
                continue
            elif 'htm' not in url:
                continue
            # list_rawid_alt = list_json["list_rawid_alt"]

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99456'
            if url.startswith("/"):
                url_before = "http://fgw.hunan.gov.cn"
            elif url.__contains__("../"):
                url_before = "http://fgw.hunan.gov.cn"
            elif url.__contains__("./"):
                url_before = "http://fgw.hunan.gov.cn/fgw/xxgk_70899/" + callmodel.sql_model.list_rawid
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./td[3]//text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./a/span/text()").get()
                # pub_date_before_1 = li.xpath("./a/div[@class='write_block_left']/span[2]/text()").get()
                # pub_date_before_2 = li.xpath("./a/div[@class='write_block_left']/span[1]/text()").get()
                # pub_date_before = pub_date_before_1 + pub_date_before_2
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_fgwhunanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_fgwhunanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="detail_tittle"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath(
            '//div[@id="effect2"]//td[contains(string(),"标") and contains(string(),"题")]/following-sibling::td[1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h3/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_date_info = ''.join(res.xpath('//li[contains(text(),"发文日期")]//text()').extract()).strip()
    pub_date = clean_pubdate(pub_date_info)
    pub_year = pub_date[:4]
    if not pub_date:
        pub_date = clean_pubdate(article_json['pub_date'])
        pub_year = pub_date[:4]

    pub_no = ''.join(res.xpath('//li[contains(string(),"文") and contains(string(),"号")]//text()').extract()).strip()
    index_no = ''.join(res.xpath('//li[contains(string(),"索") and contains(string(),"引")]//text()').extract()).strip()
    keyword = ''.join(res.xpath('//li[contains(string(),"主") and contains(string(),"词")]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//li[contains(string(),"主") and contains(string(),"分") and contains(string(),"类")]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//li[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]//text()').extract()).strip()

    legal_status = ''.join(
        res.xpath('//li[contains(string(),"时") and contains(string(),"效")]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//li[contains(string(),"发") and contains(string(),"机") and contains(string(),"构")]//text()').extract()).strip()

    organ = clean_organ(organ)
    if organ.startswith('省'):
        organ = '湖南' + organ

    fulltext_xpath = '//div[@id="j-show-body"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="tys-main-zt-show"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="TRS_Editor"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99456'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "FGWHUNAN"
    zt_provider = "fgwhunangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = clean_text(pub_no)
    data['index_no'] = clean_text(index_no)
    data['subject'] = clean_text(subject)
    data['keyword'] = clean_text(keyword)
    data['organ'] = clean_text(organ)
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 湖南省发展和改革委员会
def policy_gxthunanlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # data = re.sub(".*result\((.*)\);", "\\1", para_dicts["data"]["1_1"]['html'])
        all_data = json.loads(para_dicts["data"]["1_1"]['html'])
        total = all_data["data"]["total"]
        total_page = math.ceil(int(total) / 20)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = all_data["data"]["results"]

        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["url"]
            title = li["title"]
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99457'
            if url.startswith("/"):
                url_before = "http://gxt.hunan.gov.cn"
            # elif url.__contains__("../"):
            #     url_before = "http://gxt.hunan.gov.cn/"
            # elif url.__contains__("./"):
            #     url_before = "http://fgw.hunan.gov.cn/fgw/xxgk_70899/" + callmodel.sql_model.list_rawid
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()

            article_json["pub_date"] = clean_pubdate(
                li["publishedTimeStr"].replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_gxthunanlist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # page_info_before = res.xpath("//div[@class='pages']//a[contains(string(), '尾页')]").extract()
        page_info = re.findall('createPageHTML.*?\(\'paging\', *(\d+),', para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            page_info = re.findall('Pager\(\{size:(\d+),', para_dicts["data"]["1_1"]['html'])
            if page_info:
                max_count = int(page_info[0]) if page_info else 1
                total_page = max_count
            else:
                total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//div[contains(@class, "mian-1")]/ul/li')
        li_list = res.xpath('//table[@class="table"]/tbody/tr')
        if not li_list:
            li_list = res.xpath('//div[@class="zlyjq"]/ul/li')
        if not li_list:
            li_list = res.xpath('//ul[@class="table"]/li')
        url_path = './td/a/@href'
        title_path = './td/a/@title'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath('./td/a/text()').get()
            if url is None:
                # url = li.xpath('./a/@href').get()
                # if url is None:
                continue
            elif 'htm' not in url:
                continue
            # list_rawid_alt = list_json["list_rawid_alt"]

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99457'
            if url.startswith("/"):
                url_before = "http://gxt.hunan.gov.cn"
            elif url.__contains__("../"):
                url_before = "http://gxt.hunan.gov.cn/"
            elif url.__contains__("./"):
                url_before = "http://gxt.hunan.gov.cn/gxt/xxgk_71033/zcfg/" + callmodel.sql_model.list_rawid
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./td[3]//text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./a/span/text()").get()
                # pub_date_before_1 = li.xpath("./a/div[@class='write_block_left']/span[2]/text()").get()
                # pub_date_before_2 = li.xpath("./a/div[@class='write_block_left']/span[1]/text()").get()
                # pub_date_before = pub_date_before_1 + pub_date_before_2
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_gxthunanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_gxthunanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    res = Selector(text=html)
    # if not pub_date:
    #     # pub_date = ''.join(res.xpath(
    #     # '//div[@id="effect2"]//td[contains(string(),"发") and contains(string(),"布") and contains(string(),"日")]/following-sibling::td[1]//text()|//table[@id="effect2"]//td[contains(string(),"发") and contains(string(),"布") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()
    #     pub_date = res.xpath('//span[contains(string(),"发") and contains(string(),"布") and contains(string(),"间")]/text()').extract_first()
    #     pub_date = clean_pubdate(pub_date)
    pub_year = pub_date[:4]

    title = ''.join(res.xpath('//div[@class="detail_tittle"]//text()').extract()).strip()

    if not title:
        # title = ''.join(res.xpath('//div[@class="three-page-title navBlock"]/text()').extract()).strip()
        title = ''.join(res.xpath(
            '//div[@id="effect2"]//td[contains(string(),"标") and contains(string(),"题")]/following-sibling::td[1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h3/text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath('//li[contains(string(),"文") and contains(string(),"号")]//text()').extract()).strip()

    index_no = ''.join(res.xpath('//li[contains(string(),"索") and contains(string(),"引")]//text()').extract()).strip()
    keyword = ''.join(res.xpath('//li[contains(string(),"主") and contains(string(),"词")]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//li[contains(string(),"主") and contains(string(),"分") and contains(string(),"类")]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//li[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]//text()').extract()).strip()

    legal_status = ''.join(
        res.xpath('//li[contains(string(),"时") and contains(string(),"效")]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//li[contains(string(),"发") and contains(string(),"机") and contains(string(),"构")]//text()').extract()).strip()

    organ = clean_organ(organ)
    if organ.startswith('省'):
        organ = '湖南' + organ

    fulltext_xpath = '//div[@id="j-show-body"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="tys-main-zt-show"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@class="TRS_Editor"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99457'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "GXTHUNAN"
    zt_provider = "gxthunangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = clean_text(pub_no)
    data['index_no'] = clean_text(index_no)
    data['subject'] = clean_text(subject)
    data['keyword'] = clean_text(keyword)
    data['organ'] = clean_text(organ)
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath = '//div[contains(@class, "left_conter_20190901")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 湖南省科学技术厅

def policy_kjthunanlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # page_info_before = res.xpath("//div[@class='pages']//a[contains(string(), '尾页')]").extract()
        page_info = re.findall('createPageHTML.*?\(\'paging\', *(\d+),', para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            page_info = re.findall('Pager\(\{size:(\d+),', para_dicts["data"]["1_1"]['html'])
            if page_info:
                max_count = int(page_info[0]) if page_info else 1
                total_page = max_count
            else:
                total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//div[contains(@class, "mian-1")]/ul/li')
        li_list = res.xpath('//ul[@class="list_gkzd_content"]/li')
        if not li_list:
            li_list = res.xpath('//div[@class="xzadd-mu-1"]/table/tbody/tr')
        if not li_list:
            li_list = res.xpath('//ul[@class="table"]/li')
        url_path = './a/@href'
        title_path = './a/@title'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath('./td/a/text()').get()
            if url is None:
                url = li.xpath('./td/a/@href').get()
                if url is None:
                    continue
            elif 'htm' not in url:
                continue
            # list_rawid_alt = list_json["list_rawid_alt"]

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99458'
            if url.startswith("/"):
                url_before = "http://kjt.hunan.gov.cn"
            elif url.__contains__("../"):
                url_before = "http://kjt.hunan.gov.cn"
            elif url.__contains__("./"):
                url_before = "http://kjt.hunan.gov.cn/kjt/xxgk/" + callmodel.sql_model.list_rawid
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./span/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./td[4]/text()").get()
                # pub_date_before_1 = li.xpath("./a/div[@class='write_block_left']/span[2]/text()").get()
                # pub_date_before_2 = li.xpath("./a/div[@class='write_block_left']/span[1]/text()").get()
                # pub_date_before = pub_date_before_1 + pub_date_before_2
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_kjthunanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_kjthunanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    res = Selector(text=html)
    # if not pub_date:
    #     # pub_date = ''.join(res.xpath(
    #     # '//div[@id="effect2"]//td[contains(string(),"发") and contains(string(),"布") and contains(string(),"日")]/following-sibling::td[1]//text()|//table[@id="effect2"]//td[contains(string(),"发") and contains(string(),"布") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()
    #     pub_date = res.xpath('//span[contains(string(),"发") and contains(string(),"布") and contains(string(),"间")]/text()').extract_first()
    #     pub_date = clean_pubdate(pub_date)
    pub_year = pub_date[:4]

    title = ''.join(res.xpath('//div[@class="detail_tittle"]//text()').extract()).strip()

    if not title:
        # title = ''.join(res.xpath('//div[@class="three-page-title navBlock"]/text()').extract()).strip()
        title = ''.join(res.xpath(
            '//div[@id="effect2"]//td[contains(string(),"标") and contains(string(),"题")]/following-sibling::td[1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h3/text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath('//li[contains(string(),"文") and contains(string(),"号")]//text()').extract()).strip()

    index_no = ''.join(res.xpath('//li[contains(string(),"索") and contains(string(),"引")]//text()').extract()).strip()
    keyword = ''.join(res.xpath('//li[contains(string(),"主") and contains(string(),"词")]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//li[contains(string(),"主") and contains(string(),"分") and contains(string(),"类")]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//li[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]//text()').extract()).strip()

    legal_status = ''.join(
        res.xpath('//li[contains(string(),"时") and contains(string(),"效")]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//li[contains(string(),"所") and contains(string(),"机") and contains(string(),"构")]//text()').extract()).strip()

    organ = clean_organ(organ)
    if organ.startswith('省'):
        organ = '湖南' + organ

    fulltext_xpath = '//div[@id="j-show-body"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="tys-main-zt-show"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@id="zoomcon"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@class="TRS_Editor"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99458'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "KJTHUNAN"
    zt_provider = "kjthunangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = clean_text(pub_no)
    data['index_no'] = clean_text(index_no)
    data['subject'] = clean_text(subject)
    data['keyword'] = clean_text(keyword)
    data['organ'] = clean_text(organ)
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath = '//div[contains(@class, "left_conter_20190901")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 湖南省教育厅

def policy_jythunanlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # page_info_before = res.xpath("//div[@class='pages']//a[contains(string(), '尾页')]").extract()
        page_info = re.findall('createPageHTML.*?\(\'paging\', *(\d+),', para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            page_info = re.findall('Pager\(\{size:(\d+),', para_dicts["data"]["1_1"]['html'])
            if page_info:
                max_count = int(page_info[0]) if page_info else 1
                total_page = max_count
            else:
                total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//div[contains(@class, "mian-1")]/ul/li')
        li_list = res.xpath('//ul[@class="list_gkzd_content"]/li')
        if not li_list:
            li_list = res.xpath('//table[@class="table"]/tbody/tr')
        if not li_list:
            li_list = res.xpath('//ul[@class="table"]/li')
        url_path = './a/@href'
        title_path = './a/@title'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath('./td/a/text()').get()
            if url is None:
                url = li.xpath('./td/a/@href').get()
                if url is None:
                    continue
            elif 'htm' not in url:
                continue
            # list_rawid_alt = list_json["list_rawid_alt"]

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99459'
            if url.startswith("/"):
                url_before = "http://jyt.hunan.gov.cn"
            elif url.__contains__("../"):
                url_before = "http://jyt.hunan.gov.cn"
            elif url.__contains__("./"):
                url_before = "http://jyt.hunan.gov.cn/jyt/sjyt/xxgk/" + callmodel.sql_model.list_rawid
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./span/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./td[4]/text()").get()
                # pub_date_before_1 = li.xpath("./a/div[@class='write_block_left']/span[2]/text()").get()
                # pub_date_before_2 = li.xpath("./a/div[@class='write_block_left']/span[1]/text()").get()
                # pub_date_before = pub_date_before_1 + pub_date_before_2
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jythunanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jythunanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    res = Selector(text=html)
    # if not pub_date:
    #     # pub_date = ''.join(res.xpath(
    #     # '//div[@id="effect2"]//td[contains(string(),"发") and contains(string(),"布") and contains(string(),"日")]/following-sibling::td[1]//text()|//table[@id="effect2"]//td[contains(string(),"发") and contains(string(),"布") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()
    #     pub_date = res.xpath('//span[contains(string(),"发") and contains(string(),"布") and contains(string(),"间")]/text()').extract_first()
    #     pub_date = clean_pubdate(pub_date)
    pub_year = pub_date[:4]

    title = ''.join(res.xpath('//div[@class="detail_tittle"]//text()').extract()).strip()

    if not title:
        # title = ''.join(res.xpath('//div[@class="three-page-title navBlock"]/text()').extract()).strip()
        title = ''.join(res.xpath(
            '//div[@id="effect2"]//td[contains(string(),"标") and contains(string(),"题")]/following-sibling::td[1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h2/text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath('//li[contains(string(),"文") and contains(string(),"号")]//text()').extract()).strip()

    index_no = ''.join(res.xpath('//li[contains(string(),"索") and contains(string(),"引")]//text()').extract()).strip()
    keyword = ''.join(res.xpath('//li[contains(string(),"主") and contains(string(),"词")]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//li[contains(string(),"属") and contains(string(),"主") and contains(string(),"题")]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//li[contains(string(),"签") and contains(string(),"署") and contains(string(),"日")]//text()').extract()).strip()

    # legal_status = ''.join(
    #     res.xpath('//li[contains(string(),"时") and contains(string(),"效")]//text()').extract()).strip()
    invalid_date = ''.join(
        res.xpath('//li[contains(string(),"时") and contains(string(),"效")]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//li[contains(string(),"所") and contains(string(),"机") and contains(string(),"构")]//text()').extract()).strip()

    organ = clean_organ(organ)
    if organ.startswith('省'):
        organ = '湖南' + organ

    fulltext_xpath = '//div[@id="j-show-body"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="tys-main-zt-show"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@class="TRS_Editor"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99459'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "JYTHUNAN"
    zt_provider = "jythunangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = clean_text(pub_no)
    data['index_no'] = clean_text(index_no)
    data['subject'] = clean_text(subject)
    data['keyword'] = clean_text(keyword)
    data['organ'] = clean_text(organ)
    data['written_date'] = clean_pubdate(written_date)
    # data['legal_status'] = clean_text(legal_status)
    data['invalid_date'] = clean_pubdate(invalid_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath = '//div[contains(@class, "left_conter_20190901")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 湖南省民政厅

def policy_mzthunanlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # page_info_before = res.xpath("//div[@class='pages']//a[contains(string(), '尾页')]").extract()
        page_info = re.findall('createPageHTML.*?\(\'paging\', *(\d+),', para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            page_info = re.findall('Pager\(\{size:(\d+),', para_dicts["data"]["1_1"]['html'])
            if page_info:
                max_count = int(page_info[0]) if page_info else 1
                total_page = max_count
            else:
                total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//div[contains(@class, "mian-1")]/ul/li')
        li_list = res.xpath('//ul[@class="list_gkzd_content"]/li')
        if not li_list:
            li_list = res.xpath('//table[@class="table_list"]/tbody/tr')
        if not li_list:
            li_list = res.xpath('//ul[@class="table"]/li')
        url_path = './a/@href'
        title_path = './a/@title'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath('./td/a/text()').get()
            if url is None:
                url = li.xpath('./td/a/@href').get()
                if url is None:
                    continue
            elif 'htm' not in url:
                continue
            # list_rawid_alt = list_json["list_rawid_alt"]

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99460'
            if url.startswith("/"):
                url_before = "http://mzt.hunan.gov.cn"
            elif url.__contains__("../"):
                url_before = "http://mzt.hunan.gov.cn"
            elif url.__contains__("./"):
                url_before = "http://mzt.hunan.gov.cn/mzt/xxgk/" + callmodel.sql_model.list_rawid
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./span/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./td[2]/text()").get()
                # pub_date_before_1 = li.xpath("./a/div[@class='write_block_left']/span[2]/text()").get()
                # pub_date_before_2 = li.xpath("./a/div[@class='write_block_left']/span[1]/text()").get()
                # pub_date_before = pub_date_before_1 + pub_date_before_2
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_mzthunanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_mzthunanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    res = Selector(text=html)
    # if not pub_date:
    #     # pub_date = ''.join(res.xpath(
    #     # '//div[@id="effect2"]//td[contains(string(),"发") and contains(string(),"布") and contains(string(),"日")]/following-sibling::td[1]//text()|//table[@id="effect2"]//td[contains(string(),"发") and contains(string(),"布") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()
    #     pub_date = res.xpath('//span[contains(string(),"发") and contains(string(),"布") and contains(string(),"间")]/text()').extract_first()
    #     pub_date = clean_pubdate(pub_date)
    pub_year = pub_date[:4]

    title = ''.join(res.xpath('//div[@class="detail_tittle"]//text()').extract()).strip()

    if not title:
        # title = ''.join(res.xpath('//div[@class="three-page-title navBlock"]/text()').extract()).strip()
        title = ''.join(res.xpath(
            '//div[@id="effect2"]//td[contains(string(),"标") and contains(string(),"题")]/following-sibling::td[1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h2/text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath('//li[contains(string(),"文") and contains(string(),"号")]//text()').extract()).strip()

    index_no = ''.join(res.xpath('//li[contains(string(),"索") and contains(string(),"引")]//text()').extract()).strip()
    keyword = ''.join(res.xpath('//li[contains(string(),"主") and contains(string(),"词")]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//li[contains(string(),"主") and contains(string(),"分") and contains(string(),"类")]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//li[contains(string(),"签") and contains(string(),"署") and contains(string(),"日")]//text()').extract()).strip()

    legal_status = ''.join(
        res.xpath('//li[contains(string(),"时") and contains(string(),"效")]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//li[contains(string(),"所") and contains(string(),"机") and contains(string(),"构")]//text()').extract()).strip()

    organ = clean_organ(organ)
    if organ.startswith('省'):
        organ = '湖南' + organ

    fulltext_xpath = '//div[@id="j-show-body"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="tys-main-zt-show"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@class="TRS_Editor"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99460'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "MZTHUNAN"
    zt_provider = "mzthunangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = clean_text(pub_no)
    data['index_no'] = clean_text(index_no)
    data['subject'] = clean_text(subject)
    data['keyword'] = clean_text(keyword)
    data['organ'] = clean_text(organ)
    data['written_date'] = clean_pubdate(written_date)
    data['legal_status'] = clean_text(legal_status)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath = '//div[contains(@class, "left_conter_20190901")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 湖南省财政厅

def policy_czthunanlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # page_info_before = res.xpath("//div[@class='pages']//a[contains(string(), '尾页')]").extract()
        page_info = re.findall('createPageHTML.*?\(\'paging\', *(\d+),', para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            page_info = re.findall('Pager\(\{size:(\d+),', para_dicts["data"]["1_1"]['html'])
            if page_info:
                max_count = int(page_info[0]) if page_info else 1
                total_page = max_count
            else:
                total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//div[contains(@class, "mian-1")]/ul/li')
        li_list = res.xpath('//ul[@class="list_gkzd_content"]/li')
        if not li_list:
            li_list = res.xpath('//table[@class="table"]/tbody/tr')
        if not li_list:
            li_list = res.xpath('//ul[@class="tyl-main-right-list-a"]/li')
        url_path = './a/@href'
        title_path = './a/@title'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath('./td/a/text()').get()
            if url is None:
                url = li.xpath('./td/a/@href').get()
                if url is None:
                    continue
            elif 'htm' not in url:
                continue
            # list_rawid_alt = list_json["list_rawid_alt"]

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99461'
            if url.startswith("/"):
                url_before = "http://czt.hunan.gov.cn"
            elif url.__contains__("../"):
                url_before = "http://czt.hunan.gov.cn"
            elif url.__contains__("./"):
                url_before = "http://czt.hunan.gov.cn/czt/" + callmodel.sql_model.list_rawid
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./td[4]/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./td[3]/text()").get()
                # pub_date_before_1 = li.xpath("./a/div[@class='write_block_left']/span[2]/text()").get()
                # pub_date_before_2 = li.xpath("./a/div[@class='write_block_left']/span[1]/text()").get()
                # pub_date_before = pub_date_before_1 + pub_date_before_2
            if not pub_date_before:
                pub_date_before = li.xpath("./a/span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_czthunanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_czthunanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    res = Selector(text=html)
    # if not pub_date:
    #     # pub_date = ''.join(res.xpath(
    #     # '//div[@id="effect2"]//td[contains(string(),"发") and contains(string(),"布") and contains(string(),"日")]/following-sibling::td[1]//text()|//table[@id="effect2"]//td[contains(string(),"发") and contains(string(),"布") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()
    #     pub_date = res.xpath('//span[contains(string(),"发") and contains(string(),"布") and contains(string(),"间")]/text()').extract_first()
    #     pub_date = clean_pubdate(pub_date)
    pub_year = pub_date[:4]

    title = ''.join(res.xpath('//div[@class="detail_tittle"]//text()').extract()).strip()

    if not title:
        # title = ''.join(res.xpath('//div[@class="three-page-title navBlock"]/text()').extract()).strip()
        title = ''.join(res.xpath(
            '//div[@id="effect2"]//td[contains(string(),"标") and contains(string(),"题")]/following-sibling::td[1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h3/text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath('//li[contains(string(),"文") and contains(string(),"号")]//text()').extract()).strip()

    index_no = ''.join(res.xpath('//li[contains(string(),"索") and contains(string(),"引")]//text()').extract()).strip()
    keyword = ''.join(res.xpath('//li[contains(string(),"主") and contains(string(),"词")]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//li[contains(string(),"主") and contains(string(),"分") and contains(string(),"类")]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//li[contains(string(),"签") and contains(string(),"署") and contains(string(),"日")]//text()').extract()).strip()

    # legal_status = ''.join(
    #     res.xpath('//li[contains(string(),"时") and contains(string(),"效")]//text()').extract()).strip()

    invalid_date = ''.join(
        res.xpath(
            '//li[contains(string(),"有") and contains(string(),"效") and contains(string(),"时")]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//li[contains(string(),"所") and contains(string(),"机") and contains(string(),"构")]//text()').extract()).strip()

    organ = clean_organ(organ)
    if organ.startswith('省'):
        organ = '湖南' + organ

    fulltext_xpath = '//div[@id="j-show-body"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="tys-main-zt-show"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@class="TRS_Editor"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99461'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CZTHUNAN"
    zt_provider = "czthunangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = clean_text(pub_no)
    data['index_no'] = clean_text(index_no)
    data['subject'] = clean_text(subject)
    data['keyword'] = clean_text(keyword)
    data['organ'] = clean_text(organ)
    data['written_date'] = clean_pubdate(written_date)
    # data['legal_status'] = clean_text(legal_status)
    data['invalid_date'] = clean_pubdate(invalid_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath = '//div[contains(@class, "left_conter_20190901")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 湖南省人力资源和社会保障厅
def policy_rsthunanlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # page_info_before = res.xpath("//div[@class='pages']//a[contains(string(), '尾页')]").extract()
        page_info = re.findall('createPageHTML.*?\(\'paging\', *(\d+),', para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            page_info = re.findall('Pager\(\{size:(\d+),', para_dicts["data"]["1_1"]['html'])
            if page_info:
                max_count = int(page_info[0]) if page_info else 1
                total_page = max_count
            else:
                total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//div[contains(@class, "mian-1")]/ul/li')
        li_list = res.xpath('//ul[@class="list_gkzd_content"]/li')
        if not li_list:
            li_list = res.xpath('//table[@class="table_list"]/tbody/tr')
        if not li_list:
            li_list = res.xpath('//ul[@class="tyl-main-right-list-a"]/li')
        url_path = './a/@href'
        title_path = './a/@title'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath('./td/a/text()').get()
            if url is None:
                url = li.xpath('./td/a/@href').get()
                if url is None:
                    continue
            elif 'htm' not in url:
                continue
            # list_rawid_alt = list_json["list_rawid_alt"]

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99462'
            if url.startswith("/"):
                url_before = "http://rst.hunan.gov.cn"
            elif url.__contains__("../"):
                url_before = "http://rst.hunan.gov.cn"
            elif url.__contains__("./"):
                url_before = "http://rst.hunan.gov.cn/rst/xxgk/" + callmodel.sql_model.list_rawid
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/").strip()
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./td[3]/text()').get()
            # if not pub_date_before:
            #     pub_date_before = li.xpath("./td[3]/text()").get()
            # pub_date_before_1 = li.xpath("./a/div[@class='write_block_left']/span[2]/text()").get()
            # pub_date_before_2 = li.xpath("./a/div[@class='write_block_left']/span[1]/text()").get()
            # pub_date_before = pub_date_before_1 + pub_date_before_2
            if not pub_date_before:
                pub_date_before = li.xpath("./a/span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_rsthunanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_rsthunanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    res = Selector(text=html)
    # if not pub_date:
    #     # pub_date = ''.join(res.xpath(
    #     # '//div[@id="effect2"]//td[contains(string(),"发") and contains(string(),"布") and contains(string(),"日")]/following-sibling::td[1]//text()|//table[@id="effect2"]//td[contains(string(),"发") and contains(string(),"布") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()
    #     pub_date = res.xpath('//span[contains(string(),"发") and contains(string(),"布") and contains(string(),"间")]/text()').extract_first()
    #     pub_date = clean_pubdate(pub_date)
    pub_year = pub_date[:4]

    title = ''.join(res.xpath('//h1[@class="articletitle"]/text()').extract()).strip()

    if not title:
        # title = ''.join(res.xpath('//div[@class="three-page-title navBlock"]/text()').extract()).strip()
        title = ''.join(res.xpath(
            '//div[@id="effect2"]//td[contains(string(),"标") and contains(string(),"题")]/following-sibling::td[1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1/text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath('//li[contains(string(),"文") and contains(string(),"号")]//text()').extract()).strip()

    index_no = ''.join(res.xpath('//li[contains(string(),"索") and contains(string(),"引")]//text()').extract()).strip()
    keyword = ''.join(res.xpath('//li[contains(string(),"主") and contains(string(),"词")]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//li[contains(string(),"主") and contains(string(),"分") and contains(string(),"类")]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//li[contains(string(),"签") and contains(string(),"署") and contains(string(),"日")]//text()').extract()).strip()

    # legal_status = ''.join(
    #     res.xpath('//li[contains(string(),"时") and contains(string(),"效")]//text()').extract()).strip()

    invalid_date = ''.join(
        res.xpath(
            '//li[contains(string(),"有") and contains(string(),"效") and contains(string(),"时")]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//li[contains(string(),"所") and contains(string(),"机") and contains(string(),"构")]//text()').extract()).strip()

    organ = clean_organ(organ)
    if organ.startswith('省'):
        organ = '湖南' + organ

    fulltext_xpath = '//div[@id="j-show-body"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="tys-main-zt-show"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@class="TRS_Editor"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99462'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "RSTHUNAN"
    zt_provider = "rsthunangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = clean_text(pub_no)
    data['index_no'] = clean_text(index_no)
    data['subject'] = clean_text(subject)
    data['keyword'] = clean_text(keyword)
    data['organ'] = clean_text(organ)
    data['written_date'] = clean_pubdate(written_date)
    # data['legal_status'] = clean_text(legal_status)
    data['invalid_date'] = clean_pubdate(invalid_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath = '//div[contains(@class, "left_conter_20190901")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 湖南省农业农村厅
def policy_agrihunanlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # page_info_before = res.xpath("//div[@class='pages']//a[contains(string(), '尾页')]").extract()
        page_info = re.findall('createPageHTML.*?\(\'paging\', *(\d+),', para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            page_info = re.findall('Pager\(\{size:(\d+),', para_dicts["data"]["1_1"]['html'])
            if page_info:
                max_count = int(page_info[0]) if page_info else 1
                total_page = max_count
            else:
                total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//div[contains(@class, "mian-1")]/ul/li')
        li_list = res.xpath('//div[@class="hy-list-text"]/ul/li')
        if not li_list:
            li_list = res.xpath('//table[@class="table_list"]/tbody/tr')
        if not li_list:
            li_list = res.xpath('//ul[@class="tyl-main-right-list-a"]/li')
        url_path = './a/@href'
        title_path = './a/@title'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath('./td/a/text()').get()
            if url is None:
                url = li.xpath('./td/a/@href').get()
                if url is None:
                    continue
            elif 'htm' not in url:
                continue
            # list_rawid_alt = list_json["list_rawid_alt"]

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99463'
            if url.startswith("/"):
                url_before = "http://agri.hunan.gov.cn"
            elif url.__contains__("../"):
                url_before = "http://agri.hunan.gov.cn"
            elif url.__contains__("./"):
                url_before = "http://agri.hunan.gov.cn/agri/xxgk/" + callmodel.sql_model.list_rawid
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/").strip()
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./td[3]/text()').get()
            # if not pub_date_before:
            #     pub_date_before = li.xpath("./td[3]/text()").get()
            # pub_date_before_1 = li.xpath("./a/div[@class='write_block_left']/span[2]/text()").get()
            # pub_date_before_2 = li.xpath("./a/div[@class='write_block_left']/span[1]/text()").get()
            # pub_date_before = pub_date_before_1 + pub_date_before_2
            if not pub_date_before:
                pub_date_before = li.xpath("./a/small/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_agrihunanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_agrihunanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    res = Selector(text=html)
    # if not pub_date:
    #     # pub_date = ''.join(res.xpath(
    #     # '//div[@id="effect2"]//td[contains(string(),"发") and contains(string(),"布") and contains(string(),"日")]/following-sibling::td[1]//text()|//table[@id="effect2"]//td[contains(string(),"发") and contains(string(),"布") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()
    #     pub_date = res.xpath('//span[contains(string(),"发") and contains(string(),"布") and contains(string(),"间")]/text()').extract_first()
    #     pub_date = clean_pubdate(pub_date)
    pub_year = pub_date[:4]

    title = ''.join(res.xpath('//h1[@class="articletitle"]/text()').extract()).strip()

    if not title:
        # title = ''.join(res.xpath('//div[@class="three-page-title navBlock"]/text()').extract()).strip()
        title = ''.join(res.xpath(
            '//div[@id="effect2"]//td[contains(string(),"标") and contains(string(),"题")]/following-sibling::td[1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h2/text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath('//li[contains(string(),"文") and contains(string(),"号")]//text()').extract()).strip()

    index_no = ''.join(res.xpath('//li[contains(string(),"索") and contains(string(),"引")]//text()').extract()).strip()
    keyword = ''.join(res.xpath('//li[contains(string(),"主") and contains(string(),"词")]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//li[contains(string(),"主") and contains(string(),"分") and contains(string(),"类")]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//li[contains(string(),"签") and contains(string(),"署") and contains(string(),"日")]//text()').extract()).strip()

    # legal_status = ''.join(
    #     res.xpath('//li[contains(string(),"时") and contains(string(),"效")]//text()').extract()).strip()

    invalid_date = ''.join(
        res.xpath(
            '//li[contains(string(),"有") and contains(string(),"效") and contains(string(),"时")]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//li[contains(string(),"所") and contains(string(),"机") and contains(string(),"构")]//text()').extract()).strip()

    organ = clean_organ(organ)
    if organ.startswith('省'):
        organ = '湖南' + organ

    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="tys-main-zt-show"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@class="TRS_Editor"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99463'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "AGRIHUNAN"
    zt_provider = "agrihunangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = clean_text(pub_no)
    data['index_no'] = clean_text(index_no)
    data['subject'] = clean_text(subject)
    data['keyword'] = clean_text(keyword)
    data['organ'] = clean_text(organ)
    data['written_date'] = clean_pubdate(written_date)
    # data['legal_status'] = clean_text(legal_status)
    data['invalid_date'] = clean_pubdate(invalid_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath = '//div[contains(@class, "left_conter_20190901")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 湖南省卫生健康委员会
def policy_wjwhunanlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # page_info_before = res.xpath("//div[@class='pages']//a[contains(string(), '尾页')]").extract()
        page_info = re.findall('createPageHTML.*?\(\'paging\', *(\d+),', para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            page_info = re.findall('Pager\(\{size:(\d+),', para_dicts["data"]["1_1"]['html'])
            if page_info:
                max_count = int(page_info[0]) if page_info else 1
                total_page = max_count
            else:
                total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//div[contains(@class, "mian-1")]/ul/li')
        li_list = res.xpath('//div[@class="hy-list-text"]/ul/li')
        if not li_list:
            li_list = res.xpath('//table[@class="table_list"]/tbody/tr')
        if not li_list:
            li_list = res.xpath('//ul[@class="tyl-main-right-list-a"]/li')
        url_path = './a/@href'
        title_path = './a/@title'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath('./td/a[1]/@title').get()
            if url is None:
                url = li.xpath('./td/a[1]/@href').get()
                if url is None:
                    continue
            elif 'htm' not in url:
                continue
            # list_rawid_alt = list_json["list_rawid_alt"]

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99465'
            if url.startswith("/"):
                url_before = "http://wjw.hunan.gov.cn"
            elif url.__contains__("../"):
                url_before = "http://wjw.hunan.gov.cn"
            elif url.__contains__("./"):
                url_before = "http://wjw.hunan.gov.cn/wjw/xxgk/" + callmodel.sql_model.list_rawid
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/").strip()
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./td[3]/text()').get()
            # if not pub_date_before:
            #     pub_date_before = li.xpath("./td[3]/text()").get()
            # pub_date_before_1 = li.xpath("./a/div[@class='write_block_left']/span[2]/text()").get()
            # pub_date_before_2 = li.xpath("./a/div[@class='write_block_left']/span[1]/text()").get()
            # pub_date_before = pub_date_before_1 + pub_date_before_2
            if not pub_date_before:
                pub_date_before = li.xpath("./a/small/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_wjwhunanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_wjwhunanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    res = Selector(text=html)
    # if not pub_date:
    #     # pub_date = ''.join(res.xpath(
    #     # '//div[@id="effect2"]//td[contains(string(),"发") and contains(string(),"布") and contains(string(),"日")]/following-sibling::td[1]//text()|//table[@id="effect2"]//td[contains(string(),"发") and contains(string(),"布") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()
    #     pub_date = res.xpath('//span[contains(string(),"发") and contains(string(),"布") and contains(string(),"间")]/text()').extract_first()
    #     pub_date = clean_pubdate(pub_date)
    pub_year = pub_date[:4]

    title = ''.join(res.xpath('//h1[@class="articletitle"]/text()').extract()).strip()

    if not title:
        # title = ''.join(res.xpath('//div[@class="three-page-title navBlock"]/text()').extract()).strip()
        title = ''.join(res.xpath(
            '//div[@id="effect2"]//td[contains(string(),"标") and contains(string(),"题")]/following-sibling::td[1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h2/text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath('//li[contains(string(),"文") and contains(string(),"号")]//text()').extract()).strip()

    index_no = ''.join(res.xpath('//li[contains(string(),"索") and contains(string(),"引")]//text()').extract()).strip()
    keyword = ''.join(res.xpath('//li[contains(string(),"主") and contains(string(),"词")]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//li[contains(string(),"主") and contains(string(),"分") and contains(string(),"类")]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//li[contains(string(),"签") and contains(string(),"署") and contains(string(),"日")]//text()').extract()).strip()

    # legal_status = ''.join(
    #     res.xpath('//li[contains(string(),"时") and contains(string(),"效")]//text()').extract()).strip()

    invalid_date = ''.join(
        res.xpath(
            '//li[contains(string(),"有") and contains(string(),"效") and contains(string(),"时")]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//li[contains(string(),"发") and contains(string(),"机") and contains(string(),"构")]//text()').extract()).strip()

    organ = clean_organ(organ)
    if organ.startswith('省'):
        organ = '湖南' + organ

    fulltext_xpath = '//div[@id="j-show-body"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="tys-main-zt-show"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@class="TRS_Editor"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99465'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "WJWHUNAN"
    zt_provider = "wjwhunangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = clean_text(pub_no)
    data['index_no'] = clean_text(index_no)
    data['subject'] = clean_text(subject)
    data['keyword'] = clean_text(keyword)
    data['organ'] = clean_text(organ)
    data['written_date'] = clean_pubdate(written_date)
    # data['legal_status'] = clean_text(legal_status)
    data['invalid_date'] = clean_pubdate(invalid_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath = '//div[contains(@class, "left_conter_20190901")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 湖南省长沙市
def policy_changshalist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # page_info_before = res.xpath("//div[@class='pages']//a[contains(string(), '尾页')]").extract()
        page_info = re.findall('createPageHTML5.*?\(\"(\d+)\",.*', para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            page_info = re.findall('createPageHTML.*?\(\"(\d+)\",.*', para_dicts["data"]["1_1"]['html'])
            if page_info:
                max_count = int(page_info[0]) if page_info else 1
                total_page = max_count
            else:
                total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//div[contains(@class, "mian-1")]/ul/li')
        li_list = res.xpath('//div[@class="bd_new bd_a80 right_list"]/ul/li')
        if not li_list:
            li_list = res.xpath('//div[@class="xxgk-item"]/ul/li')
        if not li_list:
            li_list = res.xpath('//ul[@class="wjk-list"]/li')
        url_path = './p[@class="row2"]/a/@href'
        title_path = './p[@class="row2"]/a/text()'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath('./a/text()').get()
                if title is None:
                    title = ""
            if url is None:
                url = li.xpath('./a/@href').get()
                if url is None:
                    continue
            elif 'htm' not in url:
                continue
            # list_rawid_alt = list_json["list_rawid_alt"]

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99466'
            if url.startswith("/"):
                url_before = "http://www.changsha.gov.cn"
            elif url.__contains__("../"):
                url_before = "http://www.changsha.gov.cn"
            elif url.__contains__("./"):
                url_before = "http://www.changsha.gov.cn/" + callmodel.sql_model.list_rawid
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/").strip()
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./p[@class="row3"]/text()').get()
            # if not pub_date_before:
            #     pub_date_before = li.xpath("./td[3]/text()").get()
            # pub_date_before_1 = li.xpath("./a/div[@class='write_block_left']/span[2]/text()").get()
            # pub_date_before_2 = li.xpath("./a/div[@class='write_block_left']/span[1]/text()").get()
            # pub_date_before = pub_date_before_1 + pub_date_before_2
            if not pub_date_before:
                pub_date_before = li.xpath("./span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_changshaarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_changshaarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    res = Selector(text=html)
    # if not pub_date:
    #     # pub_date = ''.join(res.xpath(
    #     # '//div[@id="effect2"]//td[contains(string(),"发") and contains(string(),"布") and contains(string(),"日")]/following-sibling::td[1]//text()|//table[@id="effect2"]//td[contains(string(),"发") and contains(string(),"布") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()
    #     pub_date = res.xpath('//span[contains(string(),"发") and contains(string(),"布") and contains(string(),"间")]/text()').extract_first()
    #     pub_date = clean_pubdate(pub_date)
    pub_year = pub_date[:4]

    title = ''.join(res.xpath('//h1[@class="article-title"]/text()').extract()).strip()

    if not title:
        # title = ''.join(res.xpath('//div[@class="three-page-title navBlock"]/text()').extract()).strip()
        title = ''.join(res.xpath(
            '//div[@id="effect2"]//td[contains(string(),"标") and contains(string(),"题")]/following-sibling::td[1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h2/text()').extract()).strip()

    if not title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath('//li[contains(string(),"文") and contains(string(),"号")]//text()').extract()).strip()

    index_no = ''.join(res.xpath('//li[contains(string(),"索") and contains(string(),"引")]//text()').extract()).strip()
    keyword = ''.join(res.xpath('//li[contains(string(),"主") and contains(string(),"词")]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//li[contains(string(),"主") and contains(string(),"分") and contains(string(),"类")]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//li[contains(string(),"签") and contains(string(),"署") and contains(string(),"日")]//text()').extract()).strip()

    legal_status = ''.join(
        res.xpath(
            '//li[contains(string(),"时") and contains(string(),"效") and contains(string(),"性")]//text()').extract()).strip()

    invalid_date = ''.join(
        res.xpath(
            '//li[contains(string(),"期") and contains(string(),"效") and contains(string(),"时")]//text()').extract()).strip()
    impl_date = ''.join(
        res.xpath(
            '//li[contains(string(),"生") and contains(string(),"效") and contains(string(),"日")]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//li[contains(string(),"发") and contains(string(),"机") and contains(string(),"构")]//text()').extract()).strip()

    organ = clean_organ(organ)
    if organ.startswith('市'):
        organ = '长沙' + organ

    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="content-main"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@class="TRS_Editor"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    # 该网站存在无正文, 仅有下载的情况, 只是节点依然存在
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99466'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CHANGSHA"
    zt_provider = "changshagovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = clean_text(pub_no)
    data['index_no'] = clean_text(index_no)
    data['subject'] = clean_text(subject)
    data['keyword'] = clean_text(keyword)
    data['organ'] = clean_text(organ)
    data['written_date'] = clean_pubdate(written_date)
    data['legal_status'] = clean_text(legal_status)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['impl_date'] = clean_pubdate(impl_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath = '//div[contains(@class, "left_conter_20190901")]'
    fulltext_xpath2 = '//div[contains(@class, "fjdown")]'
    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    file_info = file_info1 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 湖南省株洲市
def policy_zhuzhoulist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info_before = res.xpath("//div[@id='pages']/a[last()-1]/@href").extract_first()
        page_info = re.sub(".*pages/(\d+)\.htm.*", "\\1", page_info_before)
        # page_info = re.findall('createPageHTML5.*?\(\"(\d+)\",.*', para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            page_info = re.findall('createPageHTML.*?\(\"(\d+)\",.*', para_dicts["data"]["1_1"]['html'])
            if page_info:
                max_count = int(page_info[0]) if page_info else 1
                total_page = max_count
            else:
                total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        # if page_index == 0:
        sql_dict = callmodel.sql_model.dict()
        di_model_bef = DealInsertModel()
        di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
        sql_dict = deal_sql_dict(sql_dict)
        list_json = json.loads(callmodel.sql_model.list_json)
        page_info = list_json["page_info"]
        for page in range(page_index + 1, total_page + 1):
            sql_dict["page"] = total_page
            sql_dict["page_index"] = page
            dic = {"page_info": f"pages/{page}"}
            sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
            # sql_dict["list_json"] = callmodel.sql_model.list_json
            di_model_bef.lists.append(sql_dict.copy())
        result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//div[contains(@class, "mian-1")]/ul/li')
        li_list = res.xpath('//div[@class="xxgk-list"]/ul/li')
        if not li_list:
            li_list = res.xpath('//table[@class="table"]/tbody/tr')
        if not li_list:
            li_list = res.xpath('//ul[@class="wjk-list"]/li')
        url_path = './td[@class="txt-left"]/a/@href'
        title_path = './td[@class="txt-left"]/a/text()'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath('./a/text()').get()
                if title is None:
                    title = ""
            if url is None:
                url = li.xpath('./a/@href').get()
                if url is None:
                    continue
            elif 'htm' not in url:
                continue
            # list_rawid_alt = list_json["list_rawid_alt"]

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99467'
            if url.startswith("/"):
                url_before = "http://www.zhuzhou.gov.cn"
            elif url.__contains__("../"):
                url_before = "http://www.zhuzhou.gov.cn"
            elif url.__contains__("./"):
                url_before = "http://www.zhuzhou.gov.cn/" + callmodel.sql_model.list_rawid
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/").strip()
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./td[4]/text()').get()
            # if not pub_date_before:
            #     pub_date_before = li.xpath("./td[3]/text()").get()
            # pub_date_before_1 = li.xpath("./a/div[@class='write_block_left']/span[2]/text()").get()
            # pub_date_before_2 = li.xpath("./a/div[@class='write_block_left']/span[1]/text()").get()
            # pub_date_before = pub_date_before_1 + pub_date_before_2
            if not pub_date_before:
                pub_date_before = li.xpath("./span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_zhuzhouarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_zhuzhouarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    res = Selector(text=html)
    pub_year = pub_date[:4]

    title = ''.join(res.xpath('//div[@class="article"]/h2/text()').extract()).strip()

    if not title:
        title = ''.join(res.xpath(
            '//div[@id="effect2"]//td[contains(string(),"标") and contains(string(),"题")]/following-sibling::td[1]//text()').extract()).strip()

    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(
        res.xpath('//p[@class="a1"]/font[contains(string(),"文") and contains(string(),"号")]//text()').extract()).strip()

    index_no = ''.join(
        res.xpath('//p[@class="a1"]/font[contains(string(),"索") and contains(string(),"引")]//text()').extract()).strip()
    keyword = ''.join(
        res.xpath('//p[@class="a1"]/font[contains(string(),"主") and contains(string(),"词")]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//p[@class="a1"]/font[contains(string(),"属") and contains(string(),"主") and contains(string(),"题")]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//p[@class="a1"]/font[contains(string(),"签") and contains(string(),"署") and contains(string(),"日")]//text()').extract()).strip()

    legal_status = ''.join(
        res.xpath(
            '//p[@class="a1"]/font[contains(string(),"时") and contains(string(),"效") and contains(string(),"性")]//text()').extract()).strip()

    invalid_date = ''.join(
        res.xpath(
            '//p[@class="a1"]/font[contains(string(),"期") and contains(string(),"效") and contains(string(),"时")]//text()').extract()).strip()
    impl_date = ''.join(
        res.xpath(
            '//p[@class="a1"]/font[contains(string(),"生") and contains(string(),"效") and contains(string(),"日")]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//p[@class="a1"]/font[contains(string(),"所") and contains(string(),"机") and contains(string(),"构")]//text()').extract()).strip()
    if not organ:
        organ = ''.join(res.xpath('//p[@class="info"]/span[contains(text(),"来源：")]/text()').extract()).strip()
        if not (organ.endswith('局') or organ.endswith('会') or organ.endswith('办公室')):
            organ = ''
    if not organ:
        organ_info = re.findall('市.{1,10}?(局|委员会|办公室)', title)
        organ = organ_info[0] if organ_info else ''
    organ = clean_organ(organ)
    if organ.startswith('市'):
        organ = '株洲' + organ

    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="art_cont"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="TRS_Editor"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    # 该网站存在无正文, 仅有下载的情况, 只是节点依然存在
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99467'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "ZHUZHOU"
    zt_provider = "zhuzhougovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = clean_text(pub_no)
    data['index_no'] = clean_text(index_no)
    data['subject'] = clean_text(subject)
    data['keyword'] = clean_text(keyword)
    data['organ'] = clean_text(organ)
    data['written_date'] = clean_pubdate(written_date)
    data['legal_status'] = clean_text(legal_status)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['impl_date'] = clean_pubdate(impl_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 湖南省湘潭市
def policy_xiangtanlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info_before = res.xpath("//div[@id='pagination']/span/text()").extract_first()
        if page_info_before:
            page_info = re.sub(".*/(\d+)页", "\\1", page_info_before)
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            page_info = res.xpath('//div[@class="jspIndex4"]/a[last()-1]/text()').extract_first()
            if page_info:
                max_count = int(page_info) if page_info else 1
                total_page = max_count
            else:
                total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page*15}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="s_list"]/li|//table[@class="table"]/tbody/tr')
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('td[2]/a/@href|a/@href').extract_first()
            if not href:
                continue
            base_url = f'http://www.xiangtan.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99468'
            article_json["url"] = url
            article_json["title"] = li.xpath('td[2]/a/text()|a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('td[3]/text()|span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_xiangtanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_xiangtanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    res = Selector(text=html)
    pub_year = pub_date[:4]

    title = ''.join(res.xpath('//div[@class="ftheme"]/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath(
            '//div[@id="effect2"]//td[contains(string(),"标") and contains(string(),"题")]/following-sibling::td[1]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath('//div[contains(@class,"ewb-info-block")]//span[contains(string(),"文") and contains(string(),"号")]//text()').extract()).strip()
    pub_no = pub_no.split('：')[-1].strip()
    index_no = ''.join(res.xpath('//div[contains(@class,"ewb-info-block")]//span[contains(string(),"索") and contains(string(),"引")]//text()').extract()).strip()
    index_no = index_no.split('：')[-1].strip()
    keyword = ''.join(res.xpath('//div[contains(@class,"ewb-info-block")]//span[contains(string(),"主") and contains(string(),"词")]//text()').extract()).strip()
    keyword = keyword.split('：')[-1].strip()
    subject = ''.join(res.xpath('//div[contains(@class,"ewb-info-block")]//span[contains(string(),"主") and contains(string(),"分") and contains(string(),"类")]//text()').extract()).strip()
    subject = subject.split('：')[-1].strip()
    written_date = ''.join(res.xpath('//div[contains(@class,"ewb-info-block")]//span[contains(string(),"签") and contains(string(),"署") and contains(string(),"日")]//text()').extract()).strip()
    written_date = written_date.split('：')[-1].strip()
    legal_status = ''.join(res.xpath('//div[contains(@class,"ewb-info-block")]//span[contains(string(),"时") and contains(string(),"效") and contains(string(),"性")]//text()').extract()).strip()
    legal_status = legal_status.split('：')[-1].strip()
    invalid_date = ''.join(res.xpath('//div[contains(@class,"ewb-info-block")]//span[contains(string(),"期") and contains(string(),"效") and contains(string(),"时")]//text()').extract()).strip()
    invalid_date = invalid_date.split('：')[-1].strip()
    impl_date = ''.join(res.xpath('//div[contains(@class,"ewb-info-block")]//span[contains(string(),"生") and contains(string(),"效") and contains(string(),"日")]//text()').extract()).strip()
    impl_date = impl_date.split('：')[-1].strip()
    organ = ''.join(res.xpath('//div[contains(@class,"ewb-info-block")]//span[contains(string(),"发") and contains(string(),"机") and contains(string(),"构")]//text()').extract()).strip()
    organ = organ.split('：')[-1].strip()
    organ = clean_organ(organ)
    if organ.startswith('市'):
        organ = '湘潭' + organ

    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="content-main"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="TRS_Editor"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    # 该网站存在无正文, 仅有下载的情况, 只是节点依然存在
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99468'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "XIANGTAN"
    zt_provider = "xiangtangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = clean_text(pub_no)
    data['index_no'] = clean_text(index_no)
    data['subject'] = clean_text(subject)
    data['keyword'] = clean_text(keyword)
    data['organ'] = clean_text(organ)
    data['written_date'] = clean_pubdate(written_date)
    data['legal_status'] = clean_text(legal_status)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['impl_date'] = clean_pubdate(impl_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    fulltext_xpath2 = '//div[contains(@class, "fjdown")]'
    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    file_info = file_info1 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 湖南省衡阳市
def policy_hengyanglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        page_info = re.findall('.*共(\d+)页.*', para_dicts["data"]["1_1"]['html'])
        if page_info:
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            page_info = re.findall('createPageHTML.*?\(\"(\d+)\",.*', para_dicts["data"]["1_1"]['html'])
            if page_info:
                max_count = int(page_info[0]) if page_info else 1
                total_page = max_count
            else:
                total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"pages/{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="list"]/li')
        if not li_list:
            li_list = res.xpath('//div[@class="gkgd-con"]/ul/li')
        if not li_list:
            li_list = res.xpath('//ul[@class="wjk-list"]/li')
        url_path = './a/@href'
        title_path = './a/p/text()'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath('./p[contains(@class, "row2") or contains(@class, "biaoti")]/a/text()').get()
                if title is None:
                    title = ""
            if url is None:
                url = li.xpath('./p[contains(@class, "row2") or contains(@class, "biaoti")]/a/@href').get()
                if url is None:
                    continue
            if 'htm' not in url:
                continue
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm","").strip())
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99469'
            if url.startswith("/"):
                url_before = "https://www.hengyang.gov.cn"
            elif url.__contains__("../"):
                url_before = "https://www.hengyang.gov.cn"
            elif url.__contains__("./"):
                url_before = "https://www.hengyang.gov.cn/" + callmodel.sql_model.list_rawid
            elif url.__contains__("http"):
                url_before = ""
            elif url.__contains__("www."):
                url_before = ""
            else:
                url_before = "https://www.hengyang.gov.cn/" + callmodel.sql_model.list_rawid + "/"
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/").strip()
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./a/span/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./p[@class='row3']/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_hengyangarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_hengyangarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    res = Selector(text=html)
    if not pub_date:
        pub_date = ''.join(res.xpath(
            '//div[contains(string(),"发") and contains(string(),"布") and contains(string(),"间")]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date)
    pub_year = pub_date[:4]

    title = ''.join(res.xpath('//div[@class="title"]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath(
        '//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]/text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]/text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]/text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//td[contains(string(),"主") and contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]/text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]/text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]/text()').extract()).strip()

    invalid_date = ''.join(res.xpath(
        '//td[contains(string(),"期") and contains(string(),"效") and contains(string(),"有")]/following-sibling::td[1]//text()').extract()).strip()

    impl_date = ''.join(res.xpath(
        '//td[contains(string(),"生") and contains(string(),"效") and contains(string(),"日")]/following-sibling::td[1]/text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"关")]/following-sibling::td[1]/text()').extract()).strip()

    organ = clean_organ(organ)
    if organ.startswith('市'):
        organ = '衡阳' + organ

    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="content_wz"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="TRS_Editor"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    # 该网站存在无正文, 仅有下载的情况, 只是节点依然存在
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99469'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "HENGYANG"
    zt_provider = "hengyanggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = clean_text(pub_no)
    data['index_no'] = clean_text(index_no)
    data['subject'] = clean_text(subject)
    data['keyword'] = clean_text(keyword)
    data['organ'] = clean_text(organ)
    data['written_date'] = clean_pubdate(written_date)
    data['legal_status'] = clean_text(legal_status)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['impl_date'] = clean_pubdate(impl_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    fulltext_xpath2 = '//div[contains(@class, "fjdown")]'
    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    file_info = file_info1 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 湖南省邵阳市
def policy_shaoyanglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        page_info = re.findall("createPageHTML\('page_div',(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            page_info = re.findall('createPageHTML.*?\(\"(\d+)\",.*', para_dicts["data"]["1_1"]['html'])
            if page_info:
                max_count = int(page_info[0]) if page_info else 1
                total_page = max_count
            else:
                total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//table[@class="table"]/tbody/tr')
        if not li_list:
            li_list = res.xpath('//div[@class="file-conts"]/ul/li')
        if not li_list:
            li_list = res.xpath('//ul[@class="xbnewlists clearfix"]/li')
        url_path = './a/@href'
        title_path = './a/text()'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath('./td[@class="articleTitle"]/a/text()').get()
                if title is None:
                    title = ""
            if url is None:
                url = li.xpath('./td[@class="articleTitle"]/a/@href').get()
                if url is None:
                    continue
            if 'htm' not in url:
                continue

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "").strip())
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99470'
            if url.startswith("/"):
                url_before = "https://www.shaoyang.gov.cn"
            elif url.__contains__("../"):
                url_before = "https://www.shaoyang.gov.cn"
            elif url.__contains__("./"):
                url_before = "https://www.shaoyang.gov.cn/" + callmodel.sql_model.list_rawid
            elif url.__contains__("http"):
                url_before = ""
            elif url.__contains__("www."):
                url_before = ""
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/").strip()
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./span[@class="cont-time"]/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./td[@class='articleDate']/text()").get()
            if not pub_date_before:
                pub_date_before = li.xpath("./span[@class='time']/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_shaoyangarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_shaoyangarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    res = Selector(text=html)
    pub_year = pub_date[:4]

    title = ''.join(res.xpath('//ucaptitle/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//td[contains(string(),"文") and contains(string(),"号")]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//td[contains(string(),"索") and contains(string(),"引")]/text()').extract()).strip()
    keyword = ''.join(res.xpath('//td[contains(string(),"主") and contains(string(),"词")]/text()').extract()).strip()
    subject = ''.join(res.xpath(
        '//td[contains(string(),"主") and contains(string(),"分") and contains(string(),"类")]/text()').extract()).strip()
    written_date = ''.join(res.xpath(
        '//td[contains(string(),"签") and contains(string(),"署") and contains(string(),"日")]/text()').extract()).strip()
    legal_status = ''.join(
        res.xpath('//td[contains(string(),"时") and contains(string(),"效")]/text()').extract()).strip()
    invalid_date = ''.join(res.xpath(
        '//td[contains(string(),"期") and contains(string(),"效") and contains(string(),"有")]/text()').extract()).strip()
    impl_date = ''.join(res.xpath(
        '//td[contains(string(),"生") and contains(string(),"效") and contains(string(),"日")]/text()').extract()).strip()
    organ = ''.join(res.xpath(
        '//td[contains(string(),"属") and contains(string(),"机") and contains(string(),"构")]/text()').extract()).strip()
    organ = clean_organ(organ)
    if organ.startswith('市'):
        organ = '邵阳' + organ

    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="Section0"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//ucapcontent'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    # 该网站存在无正文, 仅有下载的情况, 只是节点依然存在
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99470'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "SHAOYANG"
    zt_provider = "shaoyanggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = clean_text(pub_no)
    data['index_no'] = clean_text(index_no)
    data['subject'] = clean_text(subject)
    data['keyword'] = clean_text(keyword)
    data['organ'] = clean_text(organ)
    data['written_date'] = clean_pubdate(written_date)
    data['legal_status'] = clean_text(legal_status)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['impl_date'] = clean_pubdate(impl_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 湖南省岳阳市
def policy_yueyanglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            max_count = re.findall('> 共 (\d+) 页', para_dicts["data"]["1_1"]['html'])
            max_count = int(max_count[0]) if max_count else 1
            total_page = max_count
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page*15}", "url_part": list_json['url_part']}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="news-list"]/li|//table[@class="table"]/tbody/tr')
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('td[2]/a/@href|a/@href').extract_first()
            if not href:
                continue
            base_url = f'http://yueyang.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99471'
            article_json["url"] = url
            article_json["title"] = li.xpath('td[2]/a/text()|a/text()').extract_first().strip()
            article_json["pub_date"] = ''.join(li.xpath('td[4]/text()|a/span/text()').extract()).strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_yueyangarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_yueyangarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    res = Selector(text=html)
    pub_year = pub_date[:4]

    if not pub_date:
        pub_date_info = ''.join(res.xpath('//div[@class="wznr-mess-l"]/span[2]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    if not pub_date:
        raise Exception
    title = ''.join(res.xpath('//h1[@class="content-title"]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//li/span[contains(string(),"文") and contains(string(),"号")]/text()').extract()).strip()
    index_no = ''.join(
        res.xpath('//li/span[contains(string(),"索") and contains(string(),"引")]/text()').extract()).strip()
    keyword = ''.join(
        res.xpath('//li/span[contains(string(),"主") and contains(string(),"词")]/text()').extract()).strip()
    subject = ''.join(res.xpath(
        '//li/span[contains(string(),"主") and contains(string(),"分") and contains(string(),"类")]/text()').extract()).strip()
    written_date = ''.join(res.xpath(
        '//li/span[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/text()').extract()).strip()
    legal_status = ''.join(
        res.xpath('//li/span[contains(string(),"时") and contains(string(),"效")]/text()').extract()).strip()

    invalid_date = ''.join(res.xpath(
        '//li/span[contains(string(),"期") and contains(string(),"效") and contains(string(),"时")]//text()').extract()).strip()
    impl_date = ''.join(res.xpath(
        '//li/span[contains(string(),"生") and contains(string(),"效") and contains(string(),"日")]/text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//li/span[contains(string(),"发") and contains(string(),"机") and contains(string(),"构")]/text()').extract()).strip()

    organ = clean_organ(organ)
    if organ.startswith('市'):
        organ = '岳阳' + organ

    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="Section0"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//ucapcontent'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    # 该网站存在无正文, 仅有下载的情况, 只是节点依然存在
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99471'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "YUEYANG"
    zt_provider = "yueyanggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = clean_text(pub_no)
    data['index_no'] = clean_text(index_no)
    data['subject'] = clean_text(subject)
    data['keyword'] = clean_text(keyword)
    data['organ'] = clean_text(organ)
    data['written_date'] = clean_pubdate(written_date)
    data['legal_status'] = clean_text(legal_status)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['impl_date'] = clean_pubdate(impl_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 湖南省常德市
def policy_changdelist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        page_info = re.findall('pageCount:(\d+),', para_dicts["data"]["1_1"]['html'])
        if page_info:
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            page_info = re.findall('createPageHTML.*?\(\"(\d+)\",.*', para_dicts["data"]["1_1"]['html'])
            if page_info:
                max_count = int(page_info[0]) if page_info else 1
                total_page = max_count
            else:
                total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//table[@class="table"]/tbody/tr')
        if not li_list:
            li_list = res.xpath('//div[@class="file-conts"]/ul/li')
        if not li_list:
            li_list = res.xpath('//ul[@class="clearfix zcwjk-list-main"]/li')
        url_path = './p[@class="p2"]/a/@href'
        title_path = './p[@class="p2"]/a/text()'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = ""
            if url is None:
                continue
            if 'htm' not in url:
                continue
            rawid_list = url.split('/')
            if len(rawid_list) > 1:
                rawid = "{}_{}".format(rawid_list[-2],
                                       rawid_list[-1].replace(".shtml", "").replace(".html", "").replace("?",
                                                                                                         "").replace(
                                           ".htm",
                                           "").strip())
            else:
                rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "").strip()
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99472'
            if url.startswith("/"):
                url_before = "https://zwgk.changde.gov.cn"
            elif url.__contains__("../"):
                url_before = "https://zwgk.changde.gov.cn"
            elif url.__contains__("./"):
                url_before = "https://zwgk.changde.gov.cn/" + callmodel.sql_model.list_rawid
            elif url.__contains__("http"):
                url_before = ""
            elif url.__contains__("www."):
                url_before = ""
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../../", "/").replace("../", "/").replace("./", "/").strip()
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./span[@class="cont-time"]/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./p[@class='p3']/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_changdelist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        page_info = re.findall('pageCount:(\d+),', para_dicts["data"]["1_1"]['html'])
        if page_info:
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            page_info = re.findall('createPageHTML.*?\(\"(\d+)\",.*', para_dicts["data"]["1_1"]['html'])
            if page_info:
                max_count = int(page_info[0]) if page_info else 1
                total_page = max_count
            else:
                total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//table[@class="table"]/tbody/tr')
        if not li_list:
            li_list = res.xpath('//div[@class="gk_list"]/ul/li')
        if not li_list:
            li_list = res.xpath('//ul[@class="clearfix zcwjk-list-main"]/li')
        url_path = './div/a/@href'
        title_path = './div/a/text()'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = ""
            if url is None:
                continue
            if 'htm' not in url:
                continue
            rawid_list = url.split('/')
            if len(rawid_list) > 1:
                rawid = "{}_{}".format(rawid_list[-2],
                                       rawid_list[-1].replace(".shtml", "").replace(".html", "").replace("?",
                                                                                                         "").replace(
                                           ".htm",
                                           "").strip())
            else:
                rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "").strip()
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99472'
            if url.startswith("/"):
                url_before = "https://zwgk.changde.gov.cn"
            elif url.__contains__("../"):
                url_before = "https://zwgk.changde.gov.cn"
            elif url.__contains__("./"):
                url_before = "https://zwgk.changde.gov.cn/" + callmodel.sql_model.list_rawid
            elif url.__contains__("http"):
                url_before = ""
            elif url.__contains__("www."):
                url_before = ""
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../../", "/").replace("../", "/").replace("./", "/").strip()
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./span[@class="date"]/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./p[@class='p3']/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_changdelist3_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        page_info = re.findall('.*共(\d+)页.*', para_dicts["data"]["1_1"]['html'])
        if page_info:
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            page_info = re.findall('createPageHTML.*?\(\"(\d+)\",.*', para_dicts["data"]["1_1"]['html'])
            if page_info:
                max_count = int(page_info[0]) if page_info else 1
                total_page = max_count
            else:
                total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//table[@class="newsList"]/tbody/tr')
        if not li_list:
            li_list = res.xpath('//div[@class="file-conts"]/ul/li')
        if not li_list:
            li_list = res.xpath('//ul[@class="newsList"]/li')
        url_path = './a/@href'
        title_path = './a/text()'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath('./a/@title').get()
                if title is None:
                    title = ""
            if url is None:
                url = li.xpath('./td[@class="txt-left"]/a/@href').get()
                if url is None:
                    continue
            rawid_list = url.split('/')
            if len(rawid_list) > 1:
                rawid = "{}_{}".format(rawid_list[-2],
                                       rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm",
                                                                                                         "").strip())
            else:
                rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "").strip()
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99472'
            if url.startswith("/"):
                url_before = "https://www.changde.gov.cn"
            elif url.__contains__("../"):
                url_before = "https://www.changde.gov.cn"
            elif url.__contains__("./"):
                url_before = "https://www.changde.gov.cn/" + callmodel.sql_model.list_rawid
            elif url.__contains__("http"):
                url_before = ""
            elif url.__contains__("www."):
                url_before = ""
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../../", "/").replace("../", "/").replace("./", "/").strip()
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./span[@class="date"]/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./td[4]/text()").get()
            if not pub_date_before:
                pub_date_before = li.xpath("./a/span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_changdearticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_changdearticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    res = Selector(text=html)
    pub_year = pub_date[:4]

    title = ''.join(res.xpath('//h1[@class="newstitle"]/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h2[@class="title"]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    def clean_none(text):
        if text is None:
            return ""
        else:
            return text

    pub_no = clean_none(res.xpath(
        '//th[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]//text()').extract_first()).strip()

    index_no = clean_none(res.xpath(
        '//th[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]//text()').extract_first()).strip()
    written_date = clean_none(res.xpath(
        '//th[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]//text()').extract_first()).strip()
    organ = clean_none(res.xpath(
        '//th[contains(string(),"发") and contains(string(),"机") and contains(string(),"关")]/following-sibling::td[1]//text()').extract_first()).strip()

    organ = clean_organ(organ)
    if organ.startswith('市'):
        organ = '常德' + organ

    fulltext_xpath = '//div[@class="xxgk-wzcon"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="conTxt"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    # 该网站存在无正文, 仅有下载的情况, 只是节点依然存在
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99472'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CHANGDE"
    zt_provider = "changdegovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = clean_text(pub_no)
    data['index_no'] = clean_text(index_no)
    # data['subject'] = clean_text(subject)
    # data['keyword'] = clean_text(keyword)
    data['organ'] = clean_text(organ)
    data['written_date'] = clean_pubdate(written_date)
    # data['legal_status'] = clean_text(legal_status)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['impl_date'] = clean_pubdate(impl_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 湖南省张家界市
def policy_zjjlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        page_info = re.findall('.*共(\d+)页.*', para_dicts["data"]["1_1"]['html'])
        if page_info:
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            page_info = re.findall('createPageHTML.*?\(\"(\d+)\",.*', para_dicts["data"]["1_1"]['html'])
            if page_info:
                max_count = int(page_info[0]) if page_info else 1
                total_page = max_count
            else:
                total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"pages/{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="bd_new bd_a80 right_list"]/ul/li')
        if not li_list:
            li_list = res.xpath('//div[@class="gkgd-con"]/ul/li')
        if not li_list:
            li_list = res.xpath('//ul[@id="ul"]/li')
        url_path = './/a/@href'
        title_path = './/a/text()'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = ""
            if url is None:
                url = li.xpath('./a/@href').get()
                if url is None:
                    continue
            elif 'htm' not in url:
                continue
            rawid_list = url.split('/')
            if len(rawid_list) > 1:
                rawid = "{}_{}".format(rawid_list[-2],
                                       rawid_list[-1].replace(".shtml", "").replace("?", "").replace(".html",
                                                                                                     "").replace(".htm",
                                                                                                                 "").strip())
            else:
                rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "").replace("?",
                                                                                                              "").strip()
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99473'
            if url.startswith("/"):
                url_before = "http://www.zjj.gov.cn"
            elif url.__contains__("../"):
                url_before = "http://www.zjj.gov.cn"
            elif url.__contains__("./"):
                url_before = "http://www.zjj.gov.cn/" + callmodel.sql_model.list_rawid
            elif url.__contains__("http"):
                url_before = ""
            elif url.__contains__("www."):
                url_before = ""
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/").strip()
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./span/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./em/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_zjjlist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:

        total_page = 10
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"pages/{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="bd_new bd_a80 right_list"]/ul/li')
        if not li_list:
            li_list = res.xpath('//div[@class="gkgd-con"]/ul/li')
        if not li_list:
            li_list = res.xpath('//ul[@class="clear"]/li')
        url_path = './/a/@href'
        title_path = './/a/text()'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath('.//a//text()').get()
                if title is None:
                    title = ""
            if url is None:
                url = li.xpath('./a/@href').get()
                if url is None:
                    continue
            elif 'htm' not in url:
                continue
            rawid_list = url.split('/')
            if len(rawid_list) > 1:
                rawid = "{}_{}".format(rawid_list[-2],
                                       rawid_list[-1].replace(".shtml", "").replace("?", "").replace(".html",
                                                                                                     "").replace(".htm",
                                                                                                                 "").strip())
            else:
                rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "").replace("?",
                                                                                                              "").strip()
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99473'
            if url.startswith("/"):
                url_before = "http://fgw.zjj.gov.cn"
            elif url.__contains__("../"):
                url_before = "http://fgw.zjj.gov.cn"
            elif url.__contains__("./"):
                url_before = "http://fgw.zjj.gov.cn/" + callmodel.sql_model.list_rawid
            elif url.__contains__("http"):
                url_before = ""
            elif url.__contains__("www."):
                url_before = ""
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/").strip()
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./i/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./em/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_zjjarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_zjjarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    res = Selector(text=html)
    pub_year = pub_date[:4]

    title = ''.join(res.xpath('//div[@class="leader-name"]/text()').extract()).strip()

    if not title:
        title = ''.join(res.xpath(
            '//div[@id="effect2"]//td[contains(string(),"标") and contains(string(),"题")]/following-sibling::td[1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h3//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath('//li[contains(string(),"文") and contains(string(),"号")]//text()').extract()).strip()
    index_no = ''.join(res.xpath('//li[contains(string(),"索") and contains(string(),"引")]//text()').extract()).strip()
    keyword = ''.join(res.xpath('//li[contains(string(),"主") and contains(string(),"词")]//text()').extract()).strip()
    subject = ''.join(res.xpath(
        '//li[contains(string(),"主") and contains(string(),"分") and contains(string(),"类")]//text()').extract()).strip()
    written_date = ''.join(res.xpath(
        '//li[contains(string(),"签") and contains(string(),"署") and contains(string(),"日")]//text()').extract()).strip()
    legal_status = ''.join(
        res.xpath(
            '//li[contains(string(),"时") and contains(string(),"效") and contains(string(),"性")]//text()').extract()).strip()
    invalid_date = ''.join(
        res.xpath(
            '//li[contains(string(),"期") and contains(string(),"效") and contains(string(),"时")]//text()').extract()).strip()
    impl_date = ''.join(
        res.xpath(
            '//li[contains(string(),"生") and contains(string(),"效") and contains(string(),"日")]//text()').extract()).strip()
    organ = ''.join(res.xpath(
        '//li[contains(string(),"发") and contains(string(),"机") and contains(string(),"构")]//text()').extract()).strip()

    organ = clean_organ(organ)
    if organ.startswith('市'):
        organ = '张家界' + organ

    def clean_only_this(text):
        # 用于取 ]之后的文本
        if text is not None:
            text = re.sub(".*[]](.*)", "\\1", text)
            text = text.strip()
            return text
        else:
            return ""

    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="leader16-gr"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="te_content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    # 该网站存在无正文, 仅有下载的情况, 只是节点依然存在
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99473'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "ZJJ"
    zt_provider = "zjjgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = clean_text(pub_no)
    data['index_no'] = clean_only_this(index_no)
    data['subject'] = clean_text(subject)
    data['keyword'] = clean_text(keyword)
    data['organ'] = clean_text(organ)
    data['written_date'] = clean_pubdate(written_date)
    data['legal_status'] = clean_text(legal_status)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['impl_date'] = clean_pubdate(impl_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    fulltext_xpath2 = '//div[contains(@class, "fjdown")]'
    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    file_info = file_info1 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 湖南省益阳市
def policy_yiyanglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            max_count = re.findall("default.jsp',(\d+)", para_dicts["data"]["1_1"]['html'])
            max_count = int(max_count[0]) if max_count else 1
            total_page = max_count
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page * 15}", "url_part": list_json['url_part']}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="gkmus2-1"]/ul/li|//div[@class="tl_rg_con"]/ul/li|//div[@class="gfwj_table"]/table/tbody/tr')
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('td[2]/a/@href|a/@href').extract_first()
            if not href:
                continue
            base_url = f'http://www.yiyang.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99474'
            article_json["url"] = url
            article_json["title"] = li.xpath('td[2]/a/text()|a/font/text()|a/text()').extract_first().strip()
            article_json["pub_date"] = ''.join(li.xpath('td[5]/text()|span/text()').extract()).strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_yiyangarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_yiyangarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    res = Selector(text=html)
    pub_year = pub_date[:4]

    title = ''.join(res.xpath('//div[@class="xl_title"]/h3/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="ptxl_title"]//span[contains(string(),"文") and contains(string(),"号")]/text()').extract()).strip()
    pub_no = pub_no.split('：')[-1].strip()
    index_no = ''.join(res.xpath('//div[@class="ptxl_title"]//span[contains(string(),"索") and contains(string(),"引")]/text()').extract()).strip()
    index_no = index_no.split('：')[-1].strip()
    keyword = ''.join(res.xpath('//div[@class="ptxl_title"]//span[contains(string(),"主") and contains(string(),"词")]/text()').extract()).strip()
    keyword = keyword.split('：')[-1].strip()
    subject = ''.join(res.xpath(
        '//div[@class="ptxl_title"]//span[contains(string(),"主") and contains(string(),"分") and contains(string(),"类")]/text()').extract()).strip()
    subject = subject.split('：')[-1].strip()
    written_date = ''.join(res.xpath(
        '//div[@class="ptxl_title"]//span[contains(string(),"登") and contains(string(),"记") and contains(string(),"日")]/text()').extract()).strip()
    written_date = written_date.split('：')[-1].strip()
    legal_status = ''.join(
        res.xpath(
            '//div[@class="ptxl_title"]//span[contains(string(),"时") and contains(string(),"效") and contains(string(),"性")]/text()').extract()).strip()
    legal_status = legal_status.split('：')[-1].strip()
    invalid_date = ''.join(res.xpath(
        '//div[@class="ptxl_title"]//span[contains(string(),"期") and contains(string(),"效") and contains(string(),"时")]/text()').extract()).strip()
    invalid_date = invalid_date.split('：')[-1].strip()
    impl_date = ''.join(res.xpath(
        '//div[@class="ptxl_title"]//span[contains(string(),"生") and contains(string(),"效") and contains(string(),"日")]/text()').extract()).strip()
    impl_date = impl_date.split('：')[-1].strip()
    organ = ''.join(res.xpath(
        '//div[@class="ptxl_title"]//span[contains(string(),"属") and contains(string(),"机") and contains(string(),"构")]/text()').extract()).strip()
    organ = organ.split('：')[-1].strip()
    organ = clean_organ(organ)
    if organ.startswith('市'):
        organ = '益阳' + organ

    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="ptxl_text"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//ucapcontent'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    # 该网站存在无正文, 仅有下载的情况, 只是节点依然存在
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99474'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "YIYANG"
    zt_provider = "yiyanggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = clean_text(pub_no)
    data['index_no'] = clean_text(index_no)
    data['subject'] = clean_text(subject)
    data['keyword'] = clean_text(keyword)
    data['organ'] = clean_text(organ)
    data['written_date'] = clean_pubdate(written_date)
    data['legal_status'] = clean_text(legal_status)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['impl_date'] = clean_pubdate(impl_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 湖南省郴州市
def policy_czslist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            max_count = re.findall("default.jsp',(\d+)", para_dicts["data"]["1_1"]['html'])
            max_count = int(max_count[0]) if max_count else 1
            total_page = max_count
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page * 15}", "url_part": list_json['url_part']}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="yaowennr"]/ul/li|//div[contains(@class,"wjcon-sxy-con")]/ul/li')
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a[1]/@href').extract_first()
            if not href:
                continue
            base_url = f'https://www.czs.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99475'
            article_json["url"] = url
            article_json["title"] = li.xpath('a[1]/i/text()|a[1]/text()').extract_first().strip()
            if 'html/zwgk/fggw/gfxwj' in callmodel.sql_model.list_rawid:
                article_json["pub_date"] = ''.join(li.xpath('em/text()').extract()).strip()
            else:
                article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_czsarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_czsarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    res = Selector(text=html)
    pub_year = pub_date[:4]

    title = ''.join(res.xpath('//div[@class="zhengcebiaoti"]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//li[contains(string(),"文") and contains(string(),"号")]/text()').extract()).strip()
    pub_no = pub_no.split('：')[-1].strip()
    index_no = ''.join(res.xpath('//li[contains(string(),"索") and contains(string(),"引")]/text()').extract()).strip()
    index_no = index_no.split('：')[-1].strip()
    keyword = ''.join(res.xpath('//li[contains(string(),"主") and contains(string(),"词")]/text()').extract()).strip()
    keyword = keyword.split('：')[-1].strip()
    subject = ''.join(res.xpath(
        '//li[contains(string(),"主") and contains(string(),"分") and contains(string(),"类")]/text()').extract()).strip()
    subject = subject.split('：')[-1].strip()
    written_date = ''.join(res.xpath(
        '//li[contains(string(),"签") and contains(string(),"署") and contains(string(),"日")]/text()').extract()).strip()
    written_date = written_date.split('：')[-1].strip()
    legal_status = ''.join(
        res.xpath('//li[contains(string(),"时") and contains(string(),"效")]/text()').extract()).strip()
    legal_status = legal_status.split('：')[-1].strip()
    invalid_date = ''.join(res.xpath(
        '//li[contains(string(),"期") and contains(string(),"效") and contains(string(),"时")]/text()').extract()).strip()
    invalid_date = invalid_date.split('：')[-1].strip()
    impl_date = ''.join(res.xpath(
        '//li[contains(string(),"生") and contains(string(),"效") and contains(string(),"日")]/text()').extract()).strip()
    impl_date = impl_date.split('：')[-1].strip()
    organ = ''.join(res.xpath(
        '//li[contains(string(),"属") and contains(string(),"机") and contains(string(),"构")]/text()').extract()).strip()
    organ = organ.split('：')[-1].strip()
    organ = clean_organ(organ)
    if organ.startswith('市'):
        organ = '郴州' + organ

    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="ptxl_text"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//ucapcontent'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    # 该网站存在无正文, 仅有下载的情况, 只是节点依然存在
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99475'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CZS"
    zt_provider = "czsgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = clean_text(pub_no)
    data['index_no'] = clean_text(index_no)
    data['subject'] = clean_text(subject)
    data['keyword'] = clean_text(keyword)
    data['organ'] = clean_text(organ)
    data['written_date'] = clean_pubdate(written_date)
    data['legal_status'] = clean_text(legal_status)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['impl_date'] = clean_pubdate(impl_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 湖南省郴州市
def policy_yzcitylist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = re.findall("createPageHTML\('page_div',(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            page_info = res.xpath('//div[@class="jspIndex4"]/a[last()-1]/text()').extract_first()
            if page_info:
                max_count = int(page_info) if page_info else 1
                total_page = max_count
            else:
                total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="bmwj_table"]/table/tr')
        if not li_list:
            li_list = res.xpath('//div[@class="list_right"]/ul/li')
        if not li_list:
            li_list = res.xpath('//ul[contains(@class, "xbnewlists")]/li')
        url_path = './td/a[1]/@href'
        title_path = './td/a[1]/text()'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath('./a[1]/text()').get()
                if title is None:
                    title = ""
            if url is None:
                url = li.xpath('./a[1]/@href').get()
                if url is None:
                    continue
            if 'htm' not in url:
                continue
            rawid_list = url.split('/')
            if len(rawid_list) > 1:
                rawid = "{}_{}".format(rawid_list[-2],
                                       rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm",
                                                                                                         "").replace(
                                           "?", "").strip())
            else:
                rawid = rawid_list[-1].replace(".shtml", "").replace("?", "").replace(".html", "").replace(".htm",
                                                                                                           "").strip()
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99476'

            url_before = f"http://www.yzcity.gov.cn/cnyz/{callmodel.sql_model.list_rawid}/{list_json['page_info']}.shtml"
            article_json["url"] = parse.urljoin(url_before, url)
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./span/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./td[3]/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_yzcityarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_yzcityarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    res = Selector(text=html)
    pub_year = pub_date[:4]
    title = ''.join(res.xpath('//ucaptitle/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath(
        '//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]/text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]/text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]/text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//td[contains(string(),"主") and contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]/text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]/text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//td[contains(string(),"时") and contains(string(),"效") and contains(string(),"性")]/following-sibling::td[1]/text()').extract()).strip()

    invalid_date = ''.join(res.xpath(
        '//td[contains(string(),"期") and contains(string(),"效") and contains(string(),"时")]/following-sibling::td[1]/text()').extract()).strip()
    impl_date = ''.join(res.xpath(
        '//td[contains(string(),"生") and contains(string(),"效") and contains(string(),"日")]/following-sibling::td[1]/text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"关")]/following-sibling::td[1]/text()').extract()).strip()

    organ = clean_organ(organ)
    if organ.startswith('市'):
        organ = '郴州' + organ

    fulltext_xpath = '//div[@id="content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@id="zoomcon"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//ucapcontent'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    # 该网站存在无正文, 仅有下载的情况, 只是节点依然存在
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99476'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "YZCITY"
    zt_provider = "yzcitygovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = clean_text(pub_no)
    data['index_no'] = clean_text(index_no)
    data['subject'] = clean_text(subject)
    data['keyword'] = clean_text(keyword)
    data['organ'] = clean_text(organ)
    data['written_date'] = clean_pubdate(written_date)
    data['legal_status'] = clean_text(legal_status)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['impl_date'] = clean_pubdate(impl_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 湖南省怀化市
def policy_huaihualist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = re.findall("createPageHTML\('paging',(\d+),", para_dicts["data"]["1_1"]['html'])

        if page_info:

            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            # page_info = re.findall('createPageHTML.*?\(\"(\d+)\",.*', para_dicts["data"]["1_1"]['html'])
            page_info = res.xpath('//div[@class="jspIndex4"]/a[last()-1]/text()').extract_first()
            # page_info = re.sub(".*/(\d+)页", "\\1", page_info_before)
            if page_info:
                max_count = int(page_info) if page_info else 1
                total_page = max_count
            else:
                total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//div[contains(@class, "xbnewlists")]/ul/li')
        li_list = res.xpath('//div[@class="table_responsive"]/table/tbody/tr')
        if not li_list:
            li_list = res.xpath('//div[@class="box"]/ul/li')
        if not li_list:
            li_list = res.xpath('//ul[contains(@class, "newsList")]/li')
        url_path = './td[@class="td2"]/a[1]/@href'
        title_path = './td[@class="td2"]/a[1]/text()'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath('./a[1]/text()').get()
                if title is None:
                    title = ""
            if url is None:
                url = li.xpath('./a[1]/@href').get()
                if url is None:
                    continue
            if 'htm' not in url:
                continue
            # list_rawid_alt = list_json["list_rawid_alt"]

            rawid_list = url.split('/')
            if len(rawid_list) > 1:
                rawid = "{}_{}".format(rawid_list[-2],
                                       rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm",
                                                                                                         "").replace(
                                           "?", "").strip())
            else:
                rawid = rawid_list[-1].replace(".shtml", "").replace("?", "").replace(".html", "").replace(".htm",
                                                                                                           "").strip()
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99477'

            url_before = f"https://www.huaihua.gov.cn/huaihua/{callmodel.sql_model.list_rawid}/{list_json['page_info']}.shtml"
            article_json["url"] = parse.urljoin(url_before, url)
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./td[4]/span[@class="wjk-fwrq"]/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./span/text()").get()
            # pub_date_before_1 = li.xpath("./a/div[@class='write_blo'ck_left']/span[2]/text()").get()
            # pub_date_before_2 = li.xpath("./a/div[@class='write_block_left']/span[1]/text()").get()
            # pub_date_before = pub_date_before_1 + pub_date_before_2
            # if not pub_date_before:
            #     pub_date_before = li.xpath("./span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_huaihuaarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_huaihuaarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    res = Selector(text=html)
    # if not pub_date:
    #     # pub_date = ''.join(res.xpath(
    #     # '//div[@id="effect2"]//td[contains(string(),"发") and contains(string(),"布") and contains(string(),"日")]/following-sibling::td[1]//text()|//table[@id="effect2"]//td[contains(string(),"发") and contains(string(),"布") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()
    #     pub_date = ''.join(res.xpath('//div[contains(string(),"发") and contains(string(),"布") and contains(string(),"间")]/text()').extract()).strip()
    #     pub_date = clean_pubdate(pub_date)
    pub_year = pub_date[:4]

    title = ''.join(res.xpath('//h2/text()').extract()).strip()

    # if not title:
    #     # title = ''.join(res.xpath('//div[@class="three-page-title navBlock"]/text()').extract()).strip()
    #     title = ''.join(res.xpath(
    #         '//div[@id="effect2"]//td[contains(string(),"标") and contains(string(),"题")]/following-sibling::td[1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h4[@class="sp_title"]/text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath('//font[contains(string(),"文") and contains(string(),"号")]/text()').extract()).strip()

    index_no = ''.join(res.xpath('//font[contains(string(),"索") and contains(string(),"引")]/text()').extract()).strip()
    keyword = ''.join(res.xpath('//font[contains(string(),"主") and contains(string(),"词")]/text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//font[contains(string(),"主") and contains(string(),"分") and contains(string(),"类")]/text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//font[contains(string(),"签") and contains(string(),"署") and contains(string(),"日")]/text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//font[contains(string(),"时") and contains(string(),"效") and contains(string(),"性")]/text()').extract()).strip()

    invalid_date = ''.join(res.xpath(
        '//font[contains(string(),"期") and contains(string(),"效") and contains(string(),"时")]/text()').extract()).strip()
    impl_date = ''.join(res.xpath(
        '//font[contains(string(),"生") and contains(string(),"效") and contains(string(),"日")]/text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//font[contains(string(),"属") and contains(string(),"机") and contains(string(),"构")]/text()').extract()).strip()

    organ = clean_organ(organ)
    if organ.startswith('市'):
        organ = '怀化' + organ

    fulltext_xpath = '//div[@class="content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@id="zoomcon"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//ucapcontent'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    # 该网站存在无正文, 仅有下载的情况, 只是节点依然存在
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99477'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "HUAIHUA"
    zt_provider = "huaihuagovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = clean_text(pub_no)
    data['index_no'] = clean_text(index_no)
    data['subject'] = clean_text(subject)
    data['keyword'] = clean_text(keyword)
    data['organ'] = clean_text(organ)
    data['written_date'] = clean_pubdate(written_date)
    data['legal_status'] = clean_text(legal_status)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['impl_date'] = clean_pubdate(impl_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath = '//div[contains(@class, "left_conter_20190901")]'
    # fulltext_xpath2 = '//div[contains(@class, "fjdown")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 湖南省娄底市
def policy_hnloudilist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info = re.findall("createPageHTML\('paging',(\d+),", para_dicts["data"]["1_1"]['html'])
        json_format_1 = re.sub("\{(.*?):", "{\"\\1\":", para_dicts["data"]["1_1"]['html'])

        json_format_2 = re.sub(",(.*?):", ",\"\\1\":", json_format_1)

        json_format_3 = json_format_2.replace('"{"', '{"').replace('"":', '":').replace('{"}', "{}")
        all_data = json.loads(json_format_3)
        sub_data = all_data["data"]

        total_page = sub_data["pages"]
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        li_list = sub_data["list"]

        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["URL"]
            title = li["TITLE"]
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            if len(rawid_list) > 1:
                rawid = "{}_{}".format(rawid_list[-2],
                                       rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm",
                                                                                                         "").replace(
                                           "?", "").strip())
            else:
                rawid = rawid_list[-1].replace(".shtml", "").replace("?", "").replace(".html", "").replace(".htm",
                                                                                                           "").strip()
            # rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))

            temp["rawid"] = rawid
            temp["sub_db_id"] = '99478'
            url_before = f"http://www.hnloudi.gov.cn/loudi/0402/wjklist.shtml"
            article_json["url"] = parse.urljoin(url_before, url)
            article_json["title"] = title.strip()

            article_json["pub_date"] = clean_pubdate(
                li["PUB_TIME"].replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_hnloudilist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = re.findall("createPageHTML\('page_div',(\d+),", para_dicts["data"]["1_1"]['html'])

        if page_info:

            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            # page_info = re.findall('createPageHTML.*?\(\"(\d+)\",.*', para_dicts["data"]["1_1"]['html'])
            page_info = res.xpath('//div[@class="jspIndex4"]/a[last()-1]/text()').extract_first()
            # page_info = re.sub(".*/(\d+)页", "\\1", page_info_before)
            if page_info:
                max_count = int(page_info) if page_info else 1
                total_page = max_count
            else:
                total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//div[contains(@class, "xbnewlists")]/ul/li')
        li_list = res.xpath('//div[@class="xxgk-list newsList listContent"]/ul/li/h4')
        if not li_list:
            li_list = res.xpath('//div[@class="xxgk-list newsList listContent"]/li/h4')
        if not li_list:
            li_list = res.xpath('//ul[contains(@class, "news-list")]/li')
        url_path = './a[1]/@href'
        title_path = './a[1]/text()'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath('./a[1]/text()').get()
                if title is None:
                    title = ""
            if url is None:
                url = li.xpath('./a[1]/@href').get()
                if url is None:
                    continue
            if 'htm' not in url:
                continue
            # list_rawid_alt = list_json["list_rawid_alt"]

            rawid_list = url.split('/')
            if len(rawid_list) > 1:
                rawid = "{}_{}".format(rawid_list[-2],
                                       rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm",
                                                                                                         "").replace(
                                           "?", "").strip())
            else:
                rawid = rawid_list[-1].replace(".shtml", "").replace("?", "").replace(".html", "").replace(".htm",
                                                                                                           "").strip()
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99478'
            url_before = f"http://www.hnloudi.gov.cn/loudi/{callmodel.sql_model.list_rawid}/{list_json['page_info']}.html"
            article_json["url"] = parse.urljoin(url_before, url)
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./td[4]/span[@class="wjk-fwrq"]/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./span/text()").get()
            # pub_date_before_1 = li.xpath("./a/div[@class='write_blo'ck_left']/span[2]/text()").get()
            # pub_date_before_2 = li.xpath("./a/div[@class='write_block_left']/span[1]/text()").get()
            # pub_date_before = pub_date_before_1 + pub_date_before_2
            # if not pub_date_before:
            #     pub_date_before = li.xpath("./span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_hnloudiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_hnloudiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    res = Selector(text=html)

    pub_year = pub_date[:4]
    title = ''.join(res.xpath('//ucaptitle//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1[@class="content-title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath('//div[contains(@class,"xxgk-content-attr")]//span[contains(string(),"文") and contains(string(),"号")]//text()').extract()).strip()
    pub_no = pub_no.split('：')[-1].strip()
    index_no = ''.join(res.xpath('//div[contains(@class,"xxgk-content-attr")]//span[contains(string(),"索") and contains(string(),"号")]//text()').extract()).strip()
    index_no = index_no.split('：')[-1].strip()
    keyword = ''.join(res.xpath('//div[contains(@class,"xxgk-content-attr")]//span[contains(string(),"主") and contains(string(),"词")]//text()').extract()).strip()
    keyword = keyword.split('：')[-1].strip()
    subject = ''.join(res.xpath('//div[contains(@class,"xxgk-content-attr")]//span[contains(string(),"主") and contains(string(),"分") and contains(string(),"类")]//text()').extract()).strip()
    subject = subject.split('：')[-1].strip()
    written_date = ''.join(res.xpath('//div[contains(@class,"xxgk-content-attr")]//span[contains(string(),"成文日期")]//text()').extract()).strip()
    written_date = written_date.split('：')[-1].strip()
    legal_status = ''.join(res.xpath('/div[contains(@class,"xxgk-content-attr")]//span[contains(string(),"时") and contains(string(),"性")]//text()').extract()).strip()
    legal_status = legal_status.split('：')[-1].strip()
    invalid_date = ''.join(res.xpath('//div[contains(@class,"xxgk-content-attr")]//span[contains(string(),"失效日期")]//text()').extract()).strip()
    invalid_date = invalid_date.split('：')[-1].strip()
    impl_date = ''.join(res.xpath('//div[contains(@class,"xxgk-content-attr")]//span[contains(string(),"生效日期")]//text()').extract()).strip()
    impl_date = impl_date.split('：')[-1].strip()
    organ = ''.join(res.xpath('//div[contains(@class,"xxgk-content-attr")]//span[contains(string(),"所属机构")]//text()').extract()).strip()
    if not organ:
        organ = ''.join(res.xpath('//div[contains(@class,"xxgk-content-attr")]//span[contains(string(),"起草单位")]//text()').extract()).strip()
    organ = organ.split('：')[-1].strip()
    organ = clean_organ(organ)
    if organ.startswith('市'):
        organ = '娄底' + organ

    fulltext_xpath = '//div[@class="content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@id="zoomcon"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//ucapcontent'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    # 该网站存在无正文, 仅有下载的情况, 只是节点依然存在
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99478'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "HNLOUDI"
    zt_provider = "hnloudigovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = clean_text(pub_no)
    data['index_no'] = clean_text(index_no)
    data['subject'] = clean_text(subject)
    data['keyword'] = clean_text(keyword)
    data['organ'] = clean_text(organ)
    data['written_date'] = clean_pubdate(written_date)
    data['legal_status'] = clean_text(legal_status)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['impl_date'] = clean_pubdate(impl_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


# 湖南省湘西土家族苗族自治州
def policy_xxzlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = re.findall("pageCount = \"(\d+)\";", para_dicts["data"]["1_1"]['html'])

        if page_info:

            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            # page_info = re.findall('createPageHTML.*?\(\"(\d+)\",.*', para_dicts["data"]["1_1"]['html'])
            page_info = res.xpath('//div[@class="jspIndex4"]/a[last()-1]/text()').extract_first()
            # page_info = re.sub(".*/(\d+)页", "\\1", page_info_before)
            if page_info:
                max_count = int(page_info) if page_info else 1
                total_page = max_count
            else:
                total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//div[contains(@class, "xbnewlists")]/ul/li')
        li_list = res.xpath('//div[@class="xxgk-item"]/ul/li')
        if not li_list:
            li_list = res.xpath('//div[@class="xxgk-list"]/ul/li')
        if not li_list:
            li_list = res.xpath('//ul[contains(@class, "newsList")]/li')
        url_path = './a[1]/@href'
        title_path = './a[1]/@title'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath('./a[1]/text()').get()
                if title is None:
                    title = ""
            if url is None:
                url = li.xpath('./a[1]/@href').get()
                if url is None:
                    continue
            if 'htm' not in url:
                continue
            # list_rawid_alt = list_json["list_rawid_alt"]

            rawid_list = url.split('/')
            if len(rawid_list) > 1:
                rawid = "{}_{}".format(rawid_list[-2],
                                       rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm",
                                                                                                         "").replace(
                                           "?", "").strip())
            else:
                rawid = rawid_list[-1].replace(".shtml", "").replace("?", "").replace(".html", "").replace(".htm",
                                                                                                           "").strip()
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99479'

            url_before = f"http://www.xxz.gov.cn/{callmodel.sql_model.list_rawid}/{list_json['page_info']}.html"
            article_json["url"] = parse.urljoin(url_before, url)
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./td[4]/span[@class="wjk-fwrq"]/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./span/text()").get()
            # pub_date_before_1 = li.xpath("./a/div[@class='write_blo'ck_left']/span[2]/text()").get()
            # pub_date_before_2 = li.xpath("./a/div[@class='write_block_left']/span[1]/text()").get()
            # pub_date_before = pub_date_before_1 + pub_date_before_2
            # if not pub_date_before:
            #     pub_date_before = li.xpath("./span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_xxzarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_xxzarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    res = Selector(text=html)
    # if not pub_date:
    #     # pub_date = ''.join(res.xpath(
    #     # '//div[@id="effect2"]//td[contains(string(),"发") and contains(string(),"布") and contains(string(),"日")]/following-sibling::td[1]//text()|//table[@id="effect2"]//td[contains(string(),"发") and contains(string(),"布") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()
    #     pub_date = ''.join(res.xpath('//div[contains(string(),"发") and contains(string(),"布") and contains(string(),"间")]/text()').extract()).strip()
    #     pub_date = clean_pubdate(pub_date)
    pub_year = pub_date[:4]

    title = ''.join(res.xpath('//h1/text()').extract()).strip()

    # if not title:
    #     # title = ''.join(res.xpath('//div[@class="three-page-title navBlock"]/text()').extract()).strip()
    #     title = ''.join(res.xpath(
    #         '//div[@id="effect2"]//td[contains(string(),"标") and contains(string(),"题")]/following-sibling::td[1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h4[@class="sp_title"]/text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath('//div[@class="wjxx"]//span[contains(string(),"文") and contains(string(),"号")]//text()').extract()).strip()

    index_no = ''.join(res.xpath('//div[@class="wjxx"]//span[contains(string(),"索") and contains(string(),"引")]//text()').extract()).strip()
    keyword = ''.join(res.xpath('//div[@class="wjxx"]//span[contains(string(),"主") and contains(string(),"词")]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//div[@class="wjxx"]//span[contains(string(),"主") and contains(string(),"分") and contains(string(),"类")]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//div[@class="wjxx"]//span[contains(string(),"签") and contains(string(),"署") and contains(string(),"日")]//text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//div[@class="wjxx"]//span[contains(string(),"时") and contains(string(),"效") and contains(string(),"性")]//text()').extract()).strip()

    invalid_date = ''.join(res.xpath(
        '//div[@class="wjxx"]//span[contains(string(),"期") and contains(string(),"效") and contains(string(),"有")]//text()').extract()).strip()
    impl_date = ''.join(res.xpath(
        '//div[@class="wjxx"]//span[contains(string(),"生") and contains(string(),"效") and contains(string(),"日")]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//div[@class="wjxx"]//span[contains(string(),"属") and contains(string(),"机") and contains(string(),"构")]//text()').extract()).strip()

    organ = clean_organ(organ)
    if organ.startswith('州'):
        organ = '湘西' + organ

    fulltext_xpath = '//div[@class="content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@id="zoom"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//ucapcontent'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    # 该网站存在无正文, 仅有下载的情况, 只是节点依然存在
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99479'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "XXZ"
    zt_provider = "xxzgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = clean_text(pub_no)
    data['index_no'] = clean_text(index_no)
    data['subject'] = clean_text(subject)
    data['keyword'] = clean_text(keyword)
    data['organ'] = clean_text(organ)
    data['written_date'] = clean_pubdate(written_date)
    data['legal_status'] = clean_text(legal_status)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['impl_date'] = clean_pubdate(impl_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath = '//div[contains(@class, "left_conter_20190901")]'
    # fulltext_xpath2 = '//div[contains(@class, "fjdown")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 湖北省农业农村厅
def policy_nythubeilist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info = re.findall("createPageHTML\('paging',(\d+),", para_dicts["data"]["1_1"]['html'])

        all_data = json.loads(para_dicts["data"]["1_1"]['html'])

        # 备注: 这个结构模式下一次性会给最大1000条, 因此无视页数, 默认1页, 如果是超过1000页, 请参考第二种方法
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        li_list = all_data["data"]

        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["URL"]
            title = li["FILENAME"]
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            if len(rawid_list) > 1:
                rawid = "{}_{}".format(rawid_list[-2],
                                       rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm",
                                                                                                         "").replace(
                                           "?", "").strip())
            else:
                rawid = rawid_list[-1].replace(".shtml", "").replace("?", "").replace(".html", "").replace(".htm",
                                                                                                           "").strip()
            # rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))

            temp["rawid"] = rawid
            temp["sub_db_id"] = '99440'
            url_before = f"http://nyt.hubei.gov.cn/zfxxgk/zc_GK2020/{callmodel.sql_model.list_rawid}.json"
            article_json["url"] = parse.urljoin(url_before, url)
            article_json["title"] = title.strip()

            article_json["pub_date"] = clean_pubdate(
                li["PUBDATE"].replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_nythubeilist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = re.findall("createPageHTML\((\d+),", para_dicts["data"]["1_1"]['html'])

        if page_info:

            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            # page_info = re.findall('createPageHTML.*?\(\"(\d+)\",.*', para_dicts["data"]["1_1"]['html'])
            page_info = res.xpath('//div[@class="jspIndex4"]/a[last()-1]/text()').extract_first()
            # page_info = re.sub(".*/(\d+)页", "\\1", page_info_before)
            if page_info:
                max_count = int(page_info) if page_info else 1
                total_page = max_count
            else:
                total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//div[contains(@class, "xbnewlists")]/ul/li')
        # li_list = res.xpath('//div[@class="xxgk-list newsList listContent"]/ul/li/h4')
        # if not li_list:
        #     li_list = res.xpath('//div[@class="xxgk-list newsList listContent"]/li/h4')
        # if not li_list:
        li_list = res.xpath('//ul[contains(@class, "info-list")]/li')
        url_path = './a[1]/@href'
        title_path = './a[1]/@title'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath('./a[1]/text()').get()
                if title is None:
                    title = ""
            if url is None:
                url = li.xpath('./a[1]/@href').get()
                if url is None:
                    continue
            if 'htm' not in url:
                continue
            # list_rawid_alt = list_json["list_rawid_alt"]

            rawid_list = url.split('/')
            if len(rawid_list) > 1:
                rawid = "{}_{}".format(rawid_list[-2],
                                       rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm",
                                                                                                         "").replace(
                                           "?", "").strip())
            else:
                rawid = rawid_list[-1].replace(".shtml", "").replace("?", "").replace(".html", "").replace(".htm",
                                                                                                           "").strip()
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99440'
            url_before = f"http://nyt.hubei.gov.cn/{callmodel.sql_model.list_rawid}/{list_json['page_info']}.shtml"
            article_json["url"] = parse.urljoin(url_before, url).strip()
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./a[1]/span/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./span/text()").get()
            # pub_date_before_1 = li.xpath("./a/div[@class='write_blo'ck_left']/span[2]/text()").get()
            # pub_date_before_2 = li.xpath("./a/div[@class='write_block_left']/span[1]/text()").get()
            # pub_date_before = pub_date_before_1 + pub_date_before_2
            # if not pub_date_before:
            #     pub_date_before = li.xpath("./span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_nythubeiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_nythubeiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    res = Selector(text=html)
    # if not pub_date:
    #     # pub_date = ''.join(res.xpath(
    #     # '//div[@id="effect2"]//td[contains(string(),"发") and contains(string(),"布") and contains(string(),"日")]/following-sibling::td[1]//text()|//table[@id="effect2"]//td[contains(string(),"发") and contains(string(),"布") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()
    #     pub_date = ''.join(res.xpath('//div[contains(string(),"发") and contains(string(),"布") and contains(string(),"间")]/text()').extract()).strip()
    #     pub_date = clean_pubdate(pub_date)
    pub_year = pub_date[:4]

    title = ''.join(res.xpath('//div[@class="article"]/h2//text()').extract()).strip()

    # if not title:
    #     # title = ''.join(res.xpath('//div[@class="three-page-title navBlock"]/text()').extract()).strip()
    #     title = ''.join(res.xpath(
    #         '//div[@id="effect2"]//td[contains(string(),"标") and contains(string(),"题")]/following-sibling::td[1]//text()').extract()).strip()
    # if not title:
    #     title = ''.join(res.xpath('//p[@class="gz_title"]//text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//th[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]/text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//th[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]/text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//th[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]/text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//th[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]/text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//th[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]/text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//th[contains(string(),"力") and contains(string(),"效") and contains(string(),"态")]/following-sibling::td[1]/text()').extract()).strip()

    invalid_date = ''.join(res.xpath(
        '//th[contains(string(),"期") and contains(string(),"效") and contains(string(),"时")]/following-sibling::td[1]/text()').extract()).strip()
    impl_date = ''.join(res.xpath(
        '//th[contains(string(),"生") and contains(string(),"效") and contains(string(),"日")]/following-sibling::td[1]/text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//th[contains(string(),"发") and contains(string(),"机") and contains(string(),"构")]/following-sibling::td[1]/text()').extract()).strip()

    organ = clean_organ(organ)
    if organ.startswith('省'):
        organ = '湖北' + organ

    fulltext_xpath = '//div[@class="article-box"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@id="zwnr"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@class="article"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    # 该网站存在无正文, 仅有下载的情况, 只是节点依然存在
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99440'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "NYTHUBEI"
    zt_provider = "nythubeigovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = clean_text(pub_no)
    data['index_no'] = clean_text(index_no)
    data['subject'] = clean_text(subject)
    data['keyword'] = clean_text(keyword)
    data['organ'] = clean_text(organ)
    data['written_date'] = clean_pubdate(written_date)
    data['legal_status'] = clean_text(legal_status)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['impl_date'] = clean_pubdate(impl_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath = '//div[contains(@class, "left_conter_20190901")]'
    # fulltext_xpath2 = '//div[contains(@class, "fjdown")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 湖北省黄石市
def policy_huangshilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = re.findall("createPageHTML\((\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            page_info = res.xpath('//div[@class="jspIndex4"]/a[last()-1]/text()').extract_first()
            if page_info:
                max_count = int(page_info) if page_info else 1
                total_page = max_count
            else:
                total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}".replace('index', f'index_{page}')}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="newList"]/ul/li')
        url_path = './a[1]/@href'
        title_path = './a[1]/@title'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath('./a[1]/text()').get()
                if title is None:
                    title = ""
            if url is None:
                url = li.xpath('./a[1]/@href').get()
                if url is None:
                    continue
            if 'htm' not in url:
                continue
            rawid_list = url.split('/')
            if len(rawid_list) > 1:
                rawid = "{}_{}".format(rawid_list[-2],
                                       rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm",
                                                                                                         "").replace(
                                           "?", "").strip())
            else:
                rawid = rawid_list[-1].replace(".shtml", "").replace("?", "").replace(".html", "").replace(".htm",
                                                                                                           "").strip()
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99444'
            url_before = f"http://www.huangshi.gov.cn/{callmodel.sql_model.list_rawid}/{list_json['page_info']}.shtml"
            article_json["url"] = parse.urljoin(url_before, url).strip()
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./a[1]/span/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_huangshiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_huangshiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    res = Selector(text=html)
    pub_year = pub_date[:4]
    title = ''.join(res.xpath('//div[@id="title"]/h1//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath(
        '//th[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]/text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//th[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]/text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//th[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]/text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//th[contains(string(),"分") and contains(string(),"类") and contains(string(),"主")]/following-sibling::td[1]/text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//th[contains(string(),"发") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]/text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//th[contains(string(),"力") and contains(string(),"效") and contains(string(),"态")]/following-sibling::td[1]/text()').extract()).strip()

    invalid_date = ''.join(res.xpath(
        '//th[contains(string(),"期") and contains(string(),"效") and contains(string(),"时")]/following-sibling::td[1]/text()').extract()).strip()
    impl_date = ''.join(res.xpath(
        '//th[contains(string(),"生") and contains(string(),"效") and contains(string(),"日")]/following-sibling::td[1]/text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//th[contains(string(),"发") and contains(string(),"单") and contains(string(),"位")]/following-sibling::td[1]/text()').extract()).strip()

    organ = clean_organ(organ)
    if organ.startswith('市'):
        organ = '黄石' + organ

    fulltext_xpath = '//div[@class="article-box"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@id="content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    # 该网站存在无正文, 仅有下载的情况, 只是节点依然存在
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99444'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "HUANGSHI"
    zt_provider = "huangshigovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = clean_text(pub_no)
    data['index_no'] = clean_text(index_no)
    data['subject'] = clean_text(subject)
    data['keyword'] = clean_text(keyword)
    data['organ'] = clean_text(organ)
    data['written_date'] = clean_pubdate(written_date)
    data['legal_status'] = clean_text(legal_status)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['impl_date'] = clean_pubdate(impl_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 湖北省襄阳市
def policy_xiangyanglist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        all_data = json.loads(para_dicts["data"]["1_1"]['html'])
        # 备注: 这个结构模式下一次性会给最大1000条, 因此无视页数, 默认1页, 如果是超过1000页, 请参考第二种方法
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        li_list = all_data["data"]

        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["url"]
            title = li["DOCTITLE"]
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            if len(rawid_list) > 1:
                rawid = "{}_{}".format(rawid_list[-2],
                                       rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm",
                                                                                                         "").replace(
                                           "?", "").strip())
            else:
                rawid = rawid_list[-1].replace(".shtml", "").replace("?", "").replace(".html", "").replace(".htm",
                                                                                                           "").strip()
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99445'
            url_before = f"http://xxgk.xiangyang.gov.cn/{callmodel.sql_model.list_rawid}.json"
            article_json["url"] = parse.urljoin(url_before, url)
            article_json["title"] = title.strip()

            article_json["pub_date"] = clean_pubdate(
                li["PubDate"].replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_xiangyanglist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = re.findall("createPageHTML\((\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            page_info = res.xpath('//div[@class="jspIndex4"]/a[last()-1]/text()').extract_first()
            if page_info:
                max_count = int(page_info) if page_info else 1
                total_page = max_count
            else:
                total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[contains(@class, "list-t")]/li')
        if not li_list:
            li_list = res.xpath('//ul[contains(@class, "info-list")]/li')
        url_path = './a[1]/@href'
        title_path = './a[1]/@title'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath('./a[1]/text()').get()
                if title is None:
                    title = ""
            if url is None:
                url = li.xpath('./a[1]/@href').get()
                if url is None:
                    continue
            if 'htm' not in url:
                continue
            rawid_list = url.split('/')
            if len(rawid_list) > 1:
                rawid = "{}_{}".format(rawid_list[-2],
                                       rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm",
                                                                                                         "").replace(
                                           "?", "").strip())
            else:
                rawid = rawid_list[-1].replace(".shtml", "").replace("?", "").replace(".html", "").replace(".htm",
                                                                                                           "").strip()
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99445'
            url_before = f"http://{callmodel.sql_model.list_rawid}/{list_json['page_info']}.shtml"
            article_json["url"] = parse.urljoin(url_before, url).strip()
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./a[1]/span/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_xiangyangarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_xiangyangarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    res = Selector(text=html)
    pub_year = pub_date[:4]

    title = ''.join(res.xpath('//div[@class="article"]/h2//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath(
        '//th[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]/text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//th[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]/text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//th[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]/text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//th[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]/text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//th[contains(string(),"发") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]/text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//th[contains(string(),"力") and contains(string(),"效") and contains(string(),"态")]/following-sibling::td[1]/text()').extract()).strip()

    invalid_date = ''.join(res.xpath(
        '//th[contains(string(),"失") and contains(string(),"效") and contains(string(),"时")]/following-sibling::td[1]/text()').extract()).strip()
    impl_date = ''.join(res.xpath(
        '//th[contains(string(),"生") and contains(string(),"效") and contains(string(),"时")]/following-sibling::td[1]/text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//th[contains(string(),"发") and contains(string(),"机") and contains(string(),"构")]/following-sibling::td[1]/text()').extract()).strip()

    organ = clean_organ(organ)
    if organ.startswith('市'):
        organ = '襄阳' + organ

    fulltext_xpath = '//div[@class="article-box"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@id="zwnr"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="article"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    # 该网站存在无正文, 仅有下载的情况, 只是节点依然存在
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99445'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "XIANGYANG"
    zt_provider = "xiangyanggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = clean_text(pub_no)
    data['index_no'] = clean_text(index_no)
    data['subject'] = clean_text(subject)
    data['keyword'] = clean_text(keyword)
    data['organ'] = clean_text(organ)
    data['written_date'] = clean_pubdate(written_date)
    data['legal_status'] = clean_text(legal_status)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['impl_date'] = clean_pubdate(impl_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 湖北省荆州市
def policy_jingzhoulist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        all_data = json.loads(para_dicts["data"]["1_1"]['html'])
        # 备注: 这个结构模式下一次性会给最大1000条, 因此无视页数, 默认1页, 如果是超过1000页, 请参考第二种方法
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        li_list = all_data["data"]

        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["url"]
            title = li["title"]
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            if len(rawid_list) > 1:
                rawid = "{}_{}".format(rawid_list[-2],
                                       rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm",
                                                                                                         "").replace(
                                           "?", "").strip())
            else:
                rawid = rawid_list[-1].replace(".shtml", "").replace("?", "").replace(".html", "").replace(".htm",
                                                                                                           "").strip()
            # rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))

            temp["rawid"] = rawid
            temp["sub_db_id"] = '99446'
            url_before = f"http://www.jingzhou.gov.cn/{callmodel.sql_model.list_rawid}.json"
            article_json["url"] = parse.urljoin(url_before, url)
            article_json["title"] = title.strip()

            article_json["pub_date"] = clean_pubdate(
                li["time"].replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_jingzhoulist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = re.findall("pageTotal: *(\d+),", para_dicts["data"]["1_1"]['html'])

        if page_info:

            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            # page_info = re.findall('createPageHTML.*?\(\"(\d+)\",.*', para_dicts["data"]["1_1"]['html'])
            page_info = res.xpath('//div[@class="jspIndex4"]/a[last()-1]/text()').extract_first()
            # page_info = re.sub(".*/(\d+)页", "\\1", page_info_before)
            if page_info:
                max_count = int(page_info) if page_info else 1
                total_page = max_count
            else:
                total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//ul[contains(@class, "article-box jiedu-list")]/li')
        li_list = res.xpath('//div[@class="article-box jiedu-list"]/ul/li')
        # if not li_list:
        #     li_list = res.xpath('//div[@class="xxgk-list newsList listContent"]/li/h4')
        if not li_list:
            li_list = res.xpath('//ul[contains(@class, "jzgov-content-news-list")]/li')
        url_path = './a[1]/@href'
        title_path = './a[1]/@title'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath('./a[1]/text()').get()
                if title is None:
                    title = ""
            if url is None:
                url = li.xpath('./a[1]/@href').get()
                if url is None:
                    continue
            if 'htm' not in url:
                continue
            # list_rawid_alt = list_json["list_rawid_alt"]

            rawid_list = url.split('/')
            if len(rawid_list) > 1:
                rawid = "{}_{}".format(rawid_list[-2],
                                       rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm",
                                                                                                         "").replace(
                                           "?", "").strip())
            else:
                rawid = rawid_list[-1].replace(".shtml", "").replace("?", "").replace(".html", "").replace(".htm",
                                                                                                           "").strip()
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99446'
            url_before = f"http://www.jingzhou.gov.cn/{callmodel.sql_model.list_rawid}/{list_json['page_info']}.shtml"
            article_json["url"] = parse.urljoin(url_before, url).strip()
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./a[1]/span/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./span[1]/text()").get()
            # pub_date_before_1 = li.xpath("./a/div[@class='write_blo'ck_left']/span[2]/text()").get()
            # pub_date_before_2 = li.xpath("./a/div[@class='write_block_left']/span[1]/text()").get()
            # pub_date_before = pub_date_before_1 + pub_date_before_2
            # if not pub_date_before:
            #     pub_date_before = li.xpath("./span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jingzhouarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jingzhouarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    res = Selector(text=html)
    # if not pub_date:
    #     # pub_date = ''.join(res.xpath(
    #     # '//div[@id="effect2"]//td[contains(string(),"发") and contains(string(),"布") and contains(string(),"日")]/following-sibling::td[1]//text()|//table[@id="effect2"]//td[contains(string(),"发") and contains(string(),"布") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()
    #     pub_date = ''.join(res.xpath('//div[contains(string(),"发") and contains(string(),"布") and contains(string(),"间")]/text()').extract()).strip()
    #     pub_date = clean_pubdate(pub_date)
    pub_year = pub_date[:4]

    title = ''.join(res.xpath('//div[@class="jzgov-article-title"]/h2//text()').extract()).strip()

    # if not title:
    #     # title = ''.join(res.xpath('//div[@class="three-page-title navBlock"]/text()').extract()).strip()
    #     title = ''.join(res.xpath(
    #         '//div[@id="effect2"]//td[contains(string(),"标") and contains(string(),"题")]/following-sibling::td[1]//text()').extract()).strip()
    # if not title:
    #     title = ''.join(res.xpath('//p[@class="gz_title"]//text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]/text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]/text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]/text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//td[contains(string(),"主") and contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]/text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//td[contains(string(),"发") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]/text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//td[contains(string(),"力") and contains(string(),"效") and contains(string(),"态")]/following-sibling::td[1]/text()').extract()).strip()

    invalid_date = ''.join(res.xpath(
        '//td[contains(string(),"期") and contains(string(),"效") and contains(string(),"时")]/following-sibling::td[1]/text()').extract()).strip()
    impl_date = ''.join(res.xpath(
        '//td[contains(string(),"生") and contains(string(),"效") and contains(string(),"日")]/following-sibling::td[1]/text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//td[contains(string(),"发") and contains(string(),"单") and contains(string(),"位")]/following-sibling::td[1]/text()').extract()).strip()

    organ = clean_organ(organ)
    if organ.startswith('市'):
        organ = '荆州' + organ

    fulltext_xpath = '//div[@class="jzgov-article-body"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    # if not fulltext:
    #     fulltext_xpath = '//div[@id="zwnr"]'
    #     fulltext = res.xpath(fulltext_xpath).extract_first()
    # if not fulltext:
    # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
    # fulltext_xpath = '//div[@class="article"]'
    # fulltext_xpath = '//ucapcontent'

    # fulltext_xpath2 = '//div[@class="is-downlist"]'
    # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    # fulltext = res.xpath(fulltext_xpath).extract_first()
    # 该网站存在无正文, 仅有下载的情况, 只是节点依然存在
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99446'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "JINGZHOU"
    zt_provider = "jingzhougovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = clean_text(pub_no)
    data['index_no'] = clean_text(index_no)
    data['subject'] = clean_text(subject)
    data['keyword'] = clean_text(keyword)
    data['organ'] = clean_text(organ)
    data['written_date'] = clean_pubdate(written_date)
    data['legal_status'] = clean_text(legal_status)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['impl_date'] = clean_pubdate(impl_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath = '//div[contains(@class, "left_conter_20190901")]'
    # fulltext_xpath2 = '//div[contains(@class, "fjdown")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 湖北省十堰市
def policy_shiyanlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info = re.findall("createPageHTML\('paging',(\d+),", para_dicts["data"]["1_1"]['html'])

        all_data = json.loads(para_dicts["data"]["1_1"]['html'])

        # 备注: 这个结构模式下一次性会给最大1000条, 因此无视页数, 默认1页, 如果是超过1000页, 请参考第二种方法
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        li_list = all_data["data"]

        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["url"]
            title = li["DOCTITLE"]
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            if len(rawid_list) > 1:
                rawid = "{}_{}".format(rawid_list[-2],
                                       rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm",
                                                                                                         "").replace(
                                           "?", "").strip())
            else:
                rawid = rawid_list[-1].replace(".shtml", "").replace("?", "").replace(".html", "").replace(".htm",
                                                                                                           "").strip()
            # rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))

            temp["rawid"] = rawid
            temp["sub_db_id"] = '99448'
            url_before = f"http://www.shiyan.gov.cn/{callmodel.sql_model.list_rawid}.json"
            article_json["url"] = parse.urljoin(url_before, url)
            article_json["title"] = title.strip()

            article_json["pub_date"] = clean_pubdate(
                li["DocRelTime"].replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_shiyanlist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = re.findall("createPageHTML\(\"(\d+)\",", para_dicts["data"]["1_1"]['html'])

        if page_info:

            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            # page_info = re.findall('createPageHTML.*?\(\"(\d+)\",.*', para_dicts["data"]["1_1"]['html'])
            page_info = res.xpath('//div[@class="jspIndex4"]/a[last()-1]/text()').extract_first()
            # page_info = re.sub(".*/(\d+)页", "\\1", page_info_before)
            if page_info:
                max_count = int(page_info) if page_info else 1
                total_page = max_count
            else:
                total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//ul[contains(@class, "article-box jiedu-list")]/li')
        li_list = res.xpath('//div[@class="row"]//div[@class="card bg-light my-3"]')
        # if not li_list:
        #     li_list = res.xpath('//div[@class="xxgk-list newsList listContent"]/li/h4')
        if not li_list:
            li_list = res.xpath('//ul[contains(@class, "list-group")]/li')
        url_path = './a[1]/@href'
        title_path = './a[1]/@title|./a[1]/@alt'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath('./a[1]/text()|.//h5/text()').get()
                if title is None:
                    title = ""
            if url is None:
                url = li.xpath('./a[1]/@href').get()
                if url is None:
                    continue
            if 'htm' not in url:
                continue
            # list_rawid_alt = list_json["list_rawid_alt"]

            rawid_list = url.split('/')
            if len(rawid_list) > 1:
                rawid = "{}_{}".format(rawid_list[-2],
                                       rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm",
                                                                                                         "").replace(
                                           "?", "").strip())
            else:
                rawid = rawid_list[-1].replace(".shtml", "").replace("?", "").replace(".html", "").replace(".htm",
                                                                                                           "").strip()
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99448'
            url_before = f"http://{callmodel.sql_model.list_rawid}/{list_json['page_info']}.shtml"
            article_json["url"] = parse.urljoin(url_before, url).strip()
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./span[1]/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath(".//div[@class='card-footer text-right']/small/text()").get()
            # pub_date_before_1 = li.xpath("./a/div[@class='write_blo'ck_left']/span[2]/text()").get()
            # pub_date_before_2 = li.xpath("./a/div[@class='write_block_left']/span[1]/text()").get()
            # pub_date_before = pub_date_before_1 + pub_date_before_2
            # if not pub_date_before:
            #     pub_date_before = li.xpath("./span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_shiyanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_shiyanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    res = Selector(text=html)
    if not pub_date:
        # pub_date = ''.join(res.xpath(
        # '//div[@id="effect2"]//td[contains(string(),"发") and contains(string(),"布") and contains(string(),"日")]/following-sibling::td[1]//text()|//table[@id="effect2"]//td[contains(string(),"发") and contains(string(),"布") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()
        pub_date = ''.join(res.xpath(
            '//div[contains(string(),"发") and contains(string(),"布") and contains(string(),"日")]/following-sibling::div[1]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date)
    pub_year = pub_date[:4]

    title = ''.join(res.xpath('//div[@class="doc-title"]/h2//text()').extract()).strip()

    # if not title:
    #     # title = ''.join(res.xpath('//div[@class="three-page-title navBlock"]/text()').extract()).strip()
    #     title = ''.join(res.xpath(
    #         '//div[@id="effect2"]//td[contains(string(),"标") and contains(string(),"题")]/following-sibling::td[1]//text()').extract()).strip()
    # if not title:
    #     title = ''.join(res.xpath('//p[@class="gz_title"]//text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//div[contains(string(),"文") and contains(string(),"号")]/following-sibling::div[1]/text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//div[contains(string(),"索") and contains(string(),"引")]/following-sibling::div[1]/text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//div[contains(string(),"主") and contains(string(),"词")]/following-sibling::div[1]/text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//div[contains(string(),"主") and contains(string(),"分") and contains(string(),"类")]/following-sibling::div[1]/text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//div[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::div[1]/text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//div[contains(string(),"力") and contains(string(),"效") and contains(string(),"态")]/following-sibling::div[1]/text()').extract()).strip()

    invalid_date = ''.join(res.xpath(
        '//div[contains(string(),"期") and contains(string(),"效") and contains(string(),"时")]/following-sibling::div[1]/text()').extract()).strip()
    impl_date = ''.join(res.xpath(
        '//div[contains(string(),"生") and contains(string(),"效") and contains(string(),"日")]/following-sibling::div[1]/text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//div[contains(string(),"发") and contains(string(),"机") and contains(string(),"构")]/following-sibling::div[1]/text()|//div[contains(string(),"发") and contains(string(),"文") and contains(string(),"单")]/following-sibling::div[1]/text()').extract()).strip()

    organ = clean_organ(organ)
    if organ.startswith('市'):
        organ = '十堰' + organ

    fulltext_xpath = '//div[@class="doc-content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    # if not fulltext:
    #     fulltext_xpath = '//div[@id="zwnr"]'
    #     fulltext = res.xpath(fulltext_xpath).extract_first()
    # if not fulltext:
    # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
    # fulltext_xpath = '//div[@class="article"]'
    # fulltext_xpath = '//ucapcontent'

    # fulltext_xpath2 = '//div[@class="is-downlist"]'
    # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    # fulltext = res.xpath(fulltext_xpath).extract_first()
    # 该网站存在无正文, 仅有下载的情况, 只是节点依然存在
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99448'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "SHIYAN"
    zt_provider = "shiyangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = clean_text(pub_no)
    data['index_no'] = clean_text(index_no)
    data['subject'] = clean_text(subject)
    data['keyword'] = clean_text(keyword)
    data['organ'] = clean_text(organ)
    data['written_date'] = clean_pubdate(written_date)
    data['legal_status'] = clean_text(legal_status)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['impl_date'] = clean_pubdate(impl_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath = '//div[contains(@class, "left_conter_20190901")]'
    # fulltext_xpath2 = '//div[contains(@class, "fjdown")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 湖北省孝感市
def policy_xiaoganlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = re.findall('.*共(\d+)条.*', para_dicts["data"]["1_1"]['html'])

        if page_info:
            max_counts = page_info[0]
            # max_count = int(page_info[0]) if page_info else 1
            # total_page = max_count
            total_page = math.ceil(int(max_counts) / 15)
        else:
            # page_info = re.findall('createPageHTML.*?\(\"(\d+)\",.*', para_dicts["data"]["1_1"]['html'])
            page_info = res.xpath('//div[@class="jspIndex4"]/a[last()-1]/text()').extract_first()
            # page_info = re.sub(".*/(\d+)页", "\\1", page_info_before)
            if page_info:
                max_count = int(page_info) if page_info else 1
                total_page = max_count
            else:
                total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//ul[contains(@class, "article-box jiedu-list")]/li')
        # li_list = res.xpath('//div[@class="row"]//div[@class="card bg-light my-3"]')
        # if not li_list:
        #     li_list = res.xpath('//div[@class="xxgk-list newsList listContent"]/li/h4')
        # if not li_list:
        li_list = res.xpath('//ul[contains(@class, "news-list")]/li')
        url_path = './a[1]/@href'
        title_path = './a[1]/@title'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath('./a[1]/text()').get()
                if title is None:
                    title = ""
            if url is None:
                url = li.xpath('./a[1]/@href').get()
                if url is None:
                    continue
            if 'htm' not in url:
                continue
            # list_rawid_alt = list_json["list_rawid_alt"]

            rawid_list = url.split('/')
            if len(rawid_list) > 1:
                rawid = "{}_{}".format(rawid_list[-2],
                                       rawid_list[-1].replace(".jhtml", "").replace(".shtml", "").replace(".html",
                                                                                                          "").replace(
                                           ".htm",
                                           "").replace(
                                           "?", "").strip())
            else:
                rawid = rawid_list[-1].replace(".jhtml", "").replace(".shtml", "").replace("?", "").replace(".html",
                                                                                                            "").replace(
                    ".htm",
                    "").strip()
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99449'
            # url_before = f"http://{callmodel.sql_model.list_rawid}/{list_json['page_info']}.shtml"
            url_before = "http://gkml.xiaogan.gov.cn/c/www/gfxwj.jhtml"
            article_json["url"] = parse.urljoin(url_before, url).strip()
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./span[1]/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath(".//div[@class='card-footer text-right']/small/text()").get()
            # pub_date_before_1 = li.xpath("./a/div[@class='write_blo'ck_left']/span[2]/text()").get()
            # pub_date_before_2 = li.xpath("./a/div[@class='write_block_left']/span[1]/text()").get()
            # pub_date_before = pub_date_before_1 + pub_date_before_2
            # if not pub_date_before:
            #     pub_date_before = li.xpath("./span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_xiaoganlist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = re.findall('.*共(\d+)条.*', para_dicts["data"]["1_1"]['html'])

        if page_info:
            max_counts = page_info[0]
            # max_count = int(page_info[0]) if page_info else 1
            # total_page = max_count
            total_page = math.ceil(int(max_counts) / 15)
        else:
            # page_info = re.findall('createPageHTML.*?\(\"(\d+)\",.*', para_dicts["data"]["1_1"]['html'])
            page_info = res.xpath('//div[@class="jspIndex4"]/a[last()-1]/text()').extract_first()
            # page_info = re.sub(".*/(\d+)页", "\\1", page_info_before)
            if page_info:
                max_count = int(page_info) if page_info else 1
                total_page = max_count
            else:
                total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//ul[contains(@class, "article-box jiedu-list")]/li')
        # li_list = res.xpath('//div[@class="row"]//div[@class="card bg-light my-3"]')
        # if not li_list:
        #     li_list = res.xpath('//div[@class="xxgk-list newsList listContent"]/li/h4')
        # if not li_list:
        li_list = res.xpath('//ul[contains(@class, "list")]/li')
        url_path = './a[1]/@href'
        title_path = './a[1]/@title'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath('./a[1]/text()|./span/a[1]/text()').get()
                if title is None:
                    title = ""
            if url is None:
                url = li.xpath('./span/a[1]/@href').get()
                if url is None:
                    continue
            if 'htm' not in url:
                continue

            # list_rawid_alt = list_json["list_rawid_alt"]

            rawid_list = url.split('/')
            if len(rawid_list) > 1:
                rawid = "{}_{}".format(rawid_list[-2],
                                       rawid_list[-1].replace(".jhtml", "").replace(".shtml", "").replace(".html",
                                                                                                          "").replace(
                                           ".htm",
                                           "").replace(
                                           "?", "").strip())
            else:
                rawid = rawid_list[-1].replace(".jhtml", "").replace(".shtml", "").replace("?", "").replace(".html",
                                                                                                            "").replace(
                    ".htm",
                    "").strip()
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99449'
            url_before = f"http://{callmodel.sql_model.list_rawid}/{list_json['page_info']}_{callmodel.sql_model.page_index}.jhtml"
            # url_before = "http://gkml.xiaogan.gov.cn/c/www/gfxwj.jhtml"
            article_json["url"] = parse.urljoin(url_before, url).strip()
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./span[1]/span/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./span[1]/text()").get()
            # pub_date_before_1 = li.xpath("./a/div[@class='write_blo'ck_left']/span[2]/text()").get()
            # pub_date_before_2 = li.xpath("./a/div[@class='write_block_left']/span[1]/text()").get()
            # pub_date_before = pub_date_before_1 + pub_date_before_2
            # if not pub_date_before:
            #     pub_date_before = li.xpath("./span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_xiaoganarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_xiaoganarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    res = Selector(text=html)
    if not pub_date:
        # pub_date = ''.join(res.xpath(
        # '//div[@id="effect2"]//td[contains(string(),"发") and contains(string(),"布") and contains(string(),"日")]/following-sibling::td[1]//text()|//table[@id="effect2"]//td[contains(string(),"发") and contains(string(),"布") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()
        pub_date = ''.join(res.xpath(
            '//div[contains(string(),"发") and contains(string(),"布") and contains(string(),"日")]/following-sibling::div[1]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date)
    pub_year = pub_date[:4]

    title = ''.join(res.xpath('//div[@class="news-title"]/h2//text()').extract()).strip()

    if not title:
        title = ''.join(res.xpath('//div[@class="article-title"]/h1/text()').extract()).strip()
    #     title = ''.join(res.xpath(
    #         '//div[@id="effect2"]//td[contains(string(),"标") and contains(string(),"题")]/following-sibling::td[1]//text()').extract()).strip()
    # if not title:
    #     title = ''.join(res.xpath('//p[@class="gz_title"]//text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]/text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]/text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]/text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//td[contains(string(),"主") and contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]/text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//td[contains(string(),"发") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]/text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//td[contains(string(),"力") and contains(string(),"效") and contains(string(),"态")]/following-sibling::td[1]/text()').extract()).strip()

    invalid_date = ''.join(res.xpath(
        '//td[contains(string(),"期") and contains(string(),"效") and contains(string(),"时")]/following-sibling::td[1]/text()').extract()).strip()
    impl_date = ''.join(res.xpath(
        '//td[contains(string(),"生") and contains(string(),"效") and contains(string(),"日")]/following-sibling::td[1]/text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//td[contains(string(),"发") and contains(string(),"单") and contains(string(),"位")]/following-sibling::td[1]/text()').extract()).strip()

    organ = clean_organ(organ)
    if organ.startswith('市'):
        organ = '孝感' + organ

    fulltext_xpath = '//div[@id="content_txt"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//article'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    # if not fulltext:
    # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
    # fulltext_xpath = '//div[@class="article"]'
    # fulltext_xpath = '//ucapcontent'

    # fulltext_xpath2 = '//div[@class="is-downlist"]'
    # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    # fulltext = res.xpath(fulltext_xpath).extract_first()
    # 该网站存在无正文, 仅有下载的情况, 只是节点依然存在
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99449'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "XIAOGAN"
    zt_provider = "xiaogangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = clean_text(pub_no)
    data['index_no'] = clean_text(index_no)
    data['subject'] = clean_text(subject)
    data['keyword'] = clean_text(keyword)
    data['organ'] = clean_text(organ)
    data['written_date'] = clean_pubdate(written_date)
    data['legal_status'] = clean_text(legal_status)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['impl_date'] = clean_pubdate(impl_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath = '//div[contains(@class, "left_conter_20190901")]'
    # fulltext_xpath2 = '//div[contains(@class, "fjdown")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 湖北省荆门市
def policy_jingmenlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = res.xpath('//totalpage/text()').extract_first()

        if page_info:
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            # page_info = re.findall('createPageHTML.*?\(\"(\d+)\",.*', para_dicts["data"]["1_1"]['html'])
            page_info = res.xpath('//div[@class="jspIndex4"]/a[last()-1]/text()').extract_first()
            # page_info = re.sub(".*/(\d+)页", "\\1", page_info_before)
            if page_info:
                max_count = int(page_info) if page_info else 1
                total_page = max_count
            else:
                total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//ul[contains(@class, "article-box jiedu-list")]/li')
        # li_list = res.xpath('//div[@class="row"]//div[@class="card bg-light my-3"]')
        # if not li_list:
        #     li_list = res.xpath('//div[@class="xxgk-list newsList listContent"]/li/h4')
        # if not li_list:
        li_list = res.xpath('//recordset/record')
        url_path = './/a/@href'
        title_path = './/a/text()'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath('./a[1]/text()').get()
                if title is None:
                    title = ""
            if url is None:
                url = li.xpath('./a[1]/@href').get()
                if url is None:
                    continue
            if 'htm' not in url:
                continue
            # list_rawid_alt = list_json["list_rawid_alt"]

            rawid_list = url.split('/')
            if len(rawid_list) > 1:
                rawid = "{}_{}".format(rawid_list[-2],
                                       rawid_list[-1].replace(".jhtml", "").replace(".shtml", "").replace(".html",
                                                                                                          "").replace(
                                           ".htm",
                                           "").replace(
                                           "?", "").strip())
            else:
                rawid = rawid_list[-1].replace(".jhtml", "").replace(".shtml", "").replace("?", "").replace(".html",
                                                                                                            "").replace(
                    ".htm",
                    "").strip()
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99450'
            # url_before = f"http://{callmodel.sql_model.list_rawid}/{list_json['page_info']}.shtml"
            url_before = "http://www.jingmen.gov.cn"
            article_json["url"] = parse.urljoin(url_before, url).strip()
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./span[1]/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath(".//div[@class='card-footer text-right']/small/text()").get()
            # pub_date_before_1 = li.xpath("./a/div[@class='write_blo'ck_left']/span[2]/text()").get()
            # pub_date_before_2 = li.xpath("./a/div[@class='write_block_left']/span[1]/text()").get()
            # pub_date_before = pub_date_before_1 + pub_date_before_2
            # if not pub_date_before:
            #     pub_date_before = li.xpath("./span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jingmenlist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = res.xpath('//totalpage/text()').extract_first()

        if page_info:
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            # page_info = re.findall('createPageHTML.*?\(\"(\d+)\",.*', para_dicts["data"]["1_1"]['html'])
            page_info = res.xpath('//div[@class="jspIndex4"]/a[last()-1]/text()').extract_first()
            # page_info = re.sub(".*/(\d+)页", "\\1", page_info_before)
            if page_info:
                max_count = int(page_info) if page_info else 1
                total_page = max_count
            else:
                total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//ul[contains(@class, "article-box jiedu-list")]/li')
        # li_list = res.xpath('//div[@class="row"]//div[@class="card bg-light my-3"]')
        # if not li_list:
        #     li_list = res.xpath('//div[@class="xxgk-list newsList listContent"]/li/h4')
        # if not li_list:
        li_list = res.xpath('//recordset/record')
        url_path = './/a/@href'
        title_path = './/a/text()'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath('./a[1]/text()').get()
                if title is None:
                    title = ""
            if url is None:
                url = li.xpath('./a[1]/@href').get()
                if url is None:
                    continue
            if 'htm' not in url:
                continue
            # list_rawid_alt = list_json["list_rawid_alt"]

            rawid_list = url.split('/')
            if len(rawid_list) > 1:
                rawid = "{}_{}".format(rawid_list[-2],
                                       rawid_list[-1].replace(".jhtml", "").replace(".shtml", "").replace(".html",
                                                                                                          "").replace(
                                           ".htm",
                                           "").replace(
                                           "?", "").strip())
            else:
                rawid = rawid_list[-1].replace(".jhtml", "").replace(".shtml", "").replace("?", "").replace(".html",
                                                                                                            "").replace(
                    ".htm",
                    "").strip()
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99450'
            # url_before = f"http://{callmodel.sql_model.list_rawid}/{list_json['page_info']}.shtml"
            url_before = "http://www.jingmen.gov.cn"
            article_json["url"] = parse.urljoin(url_before, url).strip()
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./span[1]/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath(".//div[@class='card-footer text-right']/small/text()").get()
            # pub_date_before_1 = li.xpath("./a/div[@class='write_blo'ck_left']/span[2]/text()").get()
            # pub_date_before_2 = li.xpath("./a/div[@class='write_block_left']/span[1]/text()").get()
            # pub_date_before = pub_date_before_1 + pub_date_before_2
            # if not pub_date_before:
            #     pub_date_before = li.xpath("./span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jingmenarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jingmenarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    res = Selector(text=html)
    if not pub_date:
        # pub_date = ''.join(res.xpath(
        # '//div[@id="effect2"]//td[contains(string(),"发") and contains(string(),"布") and contains(string(),"日")]/following-sibling::td[1]//text()|//table[@id="effect2"]//td[contains(string(),"发") and contains(string(),"布") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()
        pub_date = ''.join(res.xpath(
            '//div[contains(string(),"发") and contains(string(),"布") and contains(string(),"日")]/following-sibling::div[1]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date)
    pub_year = pub_date[:4]

    title = ''.join(res.xpath('//div[@class="con zoom"]/p//text()').extract()).strip()

    if not title:
        title = ''.join(res.xpath('//div[@class="content_body_tit"]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if not pub_date:
        pub_date_info = ''.join(res.xpath('//span[contains(text(),"发布日期：")]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    if not pub_date:
        raise Exception

    pub_no = ''.join(res.xpath(
        '//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]/text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]/text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]/text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//td[contains(string(),"主") and contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]/text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//td[contains(string(),"发") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]/text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//td[contains(string(),"力") and contains(string(),"效") and contains(string(),"态")]/following-sibling::td[1]/text()').extract()).strip()

    invalid_date = ''.join(res.xpath(
        '//td[contains(string(),"期") and contains(string(),"效") and contains(string(),"时")]/following-sibling::td[1]/text()').extract()).strip()
    impl_date = ''.join(res.xpath(
        '//td[contains(string(),"生") and contains(string(),"效") and contains(string(),"日")]/following-sibling::td[1]/text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//td[contains(string(),"发") and contains(string(),"单") and contains(string(),"位")]/following-sibling::td[1]/text()').extract()).strip()

    organ = clean_organ(organ)
    if organ.startswith('市'):
        organ = '荆门' + organ

    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@id="content"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    # if not fulltext:
    # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
    # fulltext_xpath = '//div[@class="article"]'
    # fulltext_xpath = '//ucapcontent'

    # fulltext_xpath2 = '//div[@class="is-downlist"]'
    # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    # fulltext = res.xpath(fulltext_xpath).extract_first()
    # 该网站存在无正文, 仅有下载的情况, 只是节点依然存在
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99450'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "JINGMEN"
    zt_provider = "jingmengovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = clean_text(pub_no)
    data['index_no'] = clean_text(index_no)
    data['subject'] = clean_text(subject)
    data['keyword'] = clean_text(keyword)
    data['organ'] = clean_text(organ)
    data['written_date'] = clean_pubdate(written_date)
    data['legal_status'] = clean_text(legal_status)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['impl_date'] = clean_pubdate(impl_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath = '//div[contains(@class, "left_conter_20190901")]'
    # fulltext_xpath2 = '//div[contains(@class, "fjdown")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 湖北省黄冈市
def policy_hglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = res.xpath('//totalpage/text()').extract_first()

        if page_info:
            max_count = int(page_info) if page_info else 1
            total_page = math.ceil(int(max_count)/3)
        else:
            page_info = res.xpath('//div[@class="jspIndex4"]/a[last()-1]/text()').extract_first()
            if page_info:
                max_count = int(page_info) if page_info else 1
                total_page = max_count
            else:
                total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//recordset/record//table//tr')
        url_path = './td[1]/a/@href'
        title_path = './td[1]/a/@title'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath('./td[1]/a/text()').get()
                if title is None:
                    title = ""
            if url is None:
                url = li.xpath('./a[1]/@href').get()
                if url is None:
                    continue
            if 'htm' not in url:
                continue
            rawid_list = url.split('/')
            if len(rawid_list) > 1:
                rawid = "{}_{}".format(rawid_list[-2],
                                       rawid_list[-1].replace(".jhtml", "").replace(".shtml", "").replace(".html",
                                                                                                          "").replace(
                                           ".htm",
                                           "").replace(
                                           "?", "").strip())
            else:
                rawid = rawid_list[-1].replace(".jhtml", "").replace(".shtml", "").replace("?", "").replace(".html",
                                                                                                            "").replace(
                    ".htm",
                    "").strip()
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99452'
            url_before = "http://www.hg.gov.cn/"
            article_json["url"] = parse.urljoin(url_before, url).strip()
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./td[2]/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath(".//div[@class='card-footer text-right']/small/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_hglist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            max_count = re.findall("pageCount:(\d+)", para_dicts["data"]["1_1"]['html'])
            max_count = int(max_count[0]) if max_count else 1
            total_page = max_count
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"{list_json['page_info']}_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'zfxxgk1' in list_rawid:
        li_list = res.xpath('//tr[@class="xxgk_nav_con"]')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('td[@class="info"]/a[1]/@href').extract_first()
            base_url = f'https://www.hg.gov.cn/zwgk/public/column/6636765'
            # base_url = f'http://www.bjchp.gov.cn'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99452'
            article_json["url"] = url
            article_json["title"] = li.xpath('td[@class="info"]/a[1]/@title').extract_first().strip()
            article_json["pub_date"] = li.xpath('td[@class="fbrq"]/text()').extract_first()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_hgarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_hgarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    res = Selector(text=html)
    if not pub_date:
        pub_date = ''.join(res.xpath(
            '//div[contains(string(),"发") and contains(string(),"布") and contains(string(),"日")]/following-sibling::div[1]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date)
    pub_year = pub_date[:4]

    title = ''.join(res.xpath('//p[@class="con-title1"]/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1[@class="wztit"]/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//td[@id="title"]/text()').extract()).strip()

    if not title:
        title = article_json['title'].strip()
    if 'class="table_suoyinbox"' in html:
        pub_no = ''.join(res.xpath('//table[@class="table_suoyin"]//th[contains(text(),"文号")]/following::td[1]//text()').extract()).strip()
        index_no = ''.join(res.xpath('//table[@class="table_suoyin"]//th[contains(text(),"索")]/following::td[1]//text()').extract()).strip()
        keyword = ''.join(res.xpath('//table[@class="table_suoyin"]//th[contains(text(),"关")]/following::td[1]//text()').extract()).strip()
        subject = ''.join(res.xpath('//table[@class="table_suoyin"]//th[contains(text(),"主题分类")]/following::td[1]//text()').extract()).strip()
        written_date = ''
        legal_status = ''.join(res.xpath('//table[@class="table_suoyin"]//th[contains(text(),"有")]/following::td[1]//text()').extract()).strip()
        invalid_date = ''
        impl_date = ''
        organ = ''.join(res.xpath('//table[@class="table_suoyin"]//th[contains(text(),"发布机构")]/following::td[1]//text()').extract()).strip()
    else:
        pub_no = ''.join(res.xpath(
            '//table[@id="pc"]//b[contains(string(),"文") and contains(string(),"号")]/../text()|//table[@class="xxgk_wz_top"]//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]/text()').extract()).strip()
        index_no = ''.join(res.xpath(
            '//table[@id="pc"]//b[contains(string(),"索") and contains(string(),"引")]/../text()|//table[@class="xxgk_wz_top"]//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]/text()').extract()).strip()
        keyword = ''.join(res.xpath(
            '//table[@id="pc"]//b[contains(string(),"主") and contains(string(),"词")]/../text()|//table[@class="xxgk_wz_top"]//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]/text()').extract()).strip()
        subject = ''.join(res.xpath(
            '//table[@id="pc"]//b[contains(string(),"主") and contains(string(),"分") and contains(string(),"类")]/../text()|//table[@class="xxgk_wz_top"]//td[contains(string(),"主") and contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]/text()').extract()).strip()

        written_date = ''.join(res.xpath(
            '//table[@id="pc"]//b[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/../text()|//table[@class="xxgk_wz_top"]//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]/text()').extract()).strip()

        legal_status = ''.join(res.xpath(
            '//table[@id="pc"]//b[contains(string(),"有") and contains(string(),"效") and contains(string(),"性")]/../text()|//table[@class="xxgk_wz_top"]//td[contains(string(),"有") and contains(string(),"效") and contains(string(),"性")]/following-sibling::td[1]/text()').extract()).strip()
        invalid_date = ''.join(res.xpath(
            '//table[@id="pc"]//b[contains(string(),"期") and contains(string(),"效") and contains(string(),"时")]/../text()|//table[@class="xxgk_wz_top"]//td[contains(string(),"期") and contains(string(),"效") and contains(string(),"时")]/following-sibling::td[1]/text()').extract()).strip()
        impl_date = ''.join(res.xpath(
            '//table[@id="pc"]//b[contains(string(),"生") and contains(string(),"效") and contains(string(),"日")]/../text()|//table[@class="xxgk_wz_top"]//td[contains(string(),"生") and contains(string(),"效") and contains(string(),"日")]/following-sibling::td[1]/text()').extract()).strip()
        organ = ''.join(res.xpath(
            '//table[@id="pc"]//b[contains(string(),"发") and contains(string(),"单") and contains(string(),"位")]/../text()|//table[@class="xxgk_wz_top"]//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"构")]/following-sibling::td[1]/text()').extract()).strip()

    organ = clean_organ(organ)
    if organ.startswith('市'):
        organ = '黄冈' + organ

    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[contains(@class,"wzcon ")]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="main-txt1"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@id="fontZoom"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@id="main"]/form/table[2]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="TRS_Editor"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//td[@class="bt_content"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()

    # 该网站存在无正文, 仅有下载的情况, 只是节点依然存在
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99452'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "HG"
    zt_provider = "hggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = clean_text(pub_no)
    data['index_no'] = clean_text(index_no)
    data['subject'] = clean_text(subject)
    data['keyword'] = clean_text(keyword)
    data['organ'] = clean_text(organ)
    data['written_date'] = clean_pubdate(written_date)
    data['legal_status'] = clean_text(legal_status)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['impl_date'] = clean_pubdate(impl_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 湖北省咸宁市
def policy_xianninglist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        all_data = json.loads(para_dicts["data"]["1_1"]['html'])

        # 备注: 这个结构模式下一次性会给最大1000条, 因此无视页数, 默认1页, 如果是超过1000页, 请参考第二种方法
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        li_list = all_data["data"]
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["url"]
            title = li["DOCTITLE"]
            if title is None:
                title = li.xpath('./a[1]/text()').get()
                if title is None:
                    title = ""
            if url is None:
                url = li.xpath('./a[1]/@href').get()
                if url is None:
                    continue
            if 'htm' not in url:
                continue
            # list_rawid_alt = list_json["list_rawid_alt"]

            rawid_list = url.split('/')
            if len(rawid_list) > 1:
                rawid = "{}_{}".format(rawid_list[-2],
                                       rawid_list[-1].replace(".jhtml", "").replace(".shtml", "").replace(".html",
                                                                                                          "").replace(
                                           ".htm",
                                           "").replace(
                                           "?", "").strip())
            else:
                rawid = rawid_list[-1].replace(".jhtml", "").replace(".shtml", "").replace("?", "").replace(".html",
                                                                                                            "").replace(
                    ".htm",
                    "").strip()
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99453'
            url_before = f"http://www.xianning.gov.cn/{callmodel.sql_model.list_rawid}.json"
            # url_before = "http://www.jingmen.gov.cn"
            article_json["url"] = parse.urljoin(url_before, url).strip()
            article_json["title"] = title.strip()
            pub_date_before = li["DocRelTime"]

            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_xianninglist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = re.findall('createPageHTML.*?\((\d+),.*', para_dicts["data"]["1_1"]['html'])

        if page_info:
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            # page_info = re.findall('createPageHTML.*?\(\"(\d+)\",.*', para_dicts["data"]["1_1"]['html'])
            page_info = res.xpath('//div[@class="jspIndex4"]/a[last()-1]/text()').extract_first()
            # page_info = re.sub(".*/(\d+)页", "\\1", page_info_before)
            if page_info:
                max_count = int(page_info) if page_info else 1
                total_page = max_count
            else:
                total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//ul[contains(@class, "article-box jiedu-list")]/li')
        li_list = res.xpath('//tbody[@id="content_list"]/tr')
        if not li_list:
            li_list = res.xpath('//ul[@class="info-list"]/li')
        if not li_list:
            li_list = res.xpath('//recordset/record')
        url_path = './td[1]/a/@href'
        title_path = './td[1]/a/text()'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath('./a[1]/text()').get()
                if title is None:
                    title = ""
            if url is None:
                url = li.xpath('./a[1]/@href').get()
                if url is None:
                    continue
            if 'htm' not in url:
                continue
            # list_rawid_alt = list_json["list_rawid_alt"]

            rawid_list = url.split('/')
            if len(rawid_list) > 1:
                rawid = "{}_{}".format(rawid_list[-2],
                                       rawid_list[-1].replace(".jhtml", "").replace(".shtml", "").replace(".html",
                                                                                                          "").replace(
                                           ".htm",
                                           "").replace(
                                           "?", "").strip())
            else:
                rawid = rawid_list[-1].replace(".jhtml", "").replace(".shtml", "").replace("?", "").replace(".html",
                                                                                                            "").replace(
                    ".htm",
                    "").strip()
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99453'
            url_before = f"http://www.xianning.gov.cn/{callmodel.sql_model.list_rawid}/{list_json['page_info']}.shtml"
            # url_before = "http://www.jingmen.gov.cn"
            article_json["url"] = parse.urljoin(url_before, url).strip()
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./td[4]/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./a/span/text()").get()
            # pub_date_before_1 = li.xpath("./a/div[@class='write_blo'ck_left']/span[2]/text()").get()
            # pub_date_before_2 = li.xpath("./a/div[@class='write_block_left']/span[1]/text()").get()
            # pub_date_before = pub_date_before_1 + pub_date_before_2
            # if not pub_date_before:
            #     pub_date_before = li.xpath("./span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_xianningarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_xianningarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    res = Selector(text=html)
    if not pub_date:
        # pub_date = ''.join(res.xpath(
        # '//div[@id="effect2"]//td[contains(string(),"发") and contains(string(),"布") and contains(string(),"日")]/following-sibling::td[1]//text()|//table[@id="effect2"]//td[contains(string(),"发") and contains(string(),"布") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()
        pub_date = ''.join(res.xpath(
            '//b[contains(string(),"发") and contains(string(),"布") and contains(string(),"日")]/following-sibling::span[1]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date)
    pub_year = pub_date[:4]

    title = ''.join(res.xpath('//h1/text()').extract()).strip()

    if not title:
        title = ''.join(res.xpath('//div[@class="content_body_tit"]/text()').extract()).strip()
    #     title = ''.join(res.xpath(
    #         '//div[@id="effect2"]//td[contains(string(),"标") and contains(string(),"题")]/following-sibling::td[1]//text()').extract()).strip()
    # if not title:
    #     title = ''.join(res.xpath('//p[@class="gz_title"]//text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//b[contains(string(),"文") and contains(string(),"号")]/following-sibling::span[1]/text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//b[contains(string(),"索") and contains(string(),"引")]/following-sibling::span[1]/text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//b[contains(string(),"主") and contains(string(),"词")]/following-sibling::span[1]/text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//b[contains(string(),"主") and contains(string(),"分") and contains(string(),"类")]/following-sibling::span[1]/text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//b[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::span[1]/text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//b[contains(string(),"有") and contains(string(),"效") and contains(string(),"性")]/following-sibling::span[1]/text()').extract()).strip()

    invalid_date = ''.join(res.xpath(
        '//b[contains(string(),"期") and contains(string(),"效") and contains(string(),"时")]/following-sibling::span[1]/text()').extract()).strip()
    impl_date = ''.join(res.xpath(
        '//b[contains(string(),"生") and contains(string(),"效") and contains(string(),"日")]/following-sibling::span[1]/text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//b[contains(string(),"发") and contains(string(),"机") and contains(string(),"构")]/following-sibling::span[1]/text()|//b[contains(string(),"发") and contains(string(),"单") and contains(string(),"位")]/following-sibling::span[1]/text()').extract()).strip()

    organ = clean_organ(organ)
    if organ.startswith('市'):
        organ = '咸宁' + organ

    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@id="fontzoom"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    # if not fulltext:
    # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
    # fulltext_xpath = '//div[@class="article"]'
    # fulltext_xpath = '//ucapcontent'

    # fulltext_xpath2 = '//div[@class="is-downlist"]'
    # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    # fulltext = res.xpath(fulltext_xpath).extract_first()
    # 该网站存在无正文, 仅有下载的情况, 只是节点依然存在
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99453'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "XIANNING"
    zt_provider = "xianninggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = clean_text(pub_no)
    data['index_no'] = clean_text(index_no)
    data['subject'] = clean_text(subject)
    data['keyword'] = clean_text(keyword)
    data['organ'] = clean_text(organ)
    data['written_date'] = clean_pubdate(written_date)
    data['legal_status'] = clean_text(legal_status)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['impl_date'] = clean_pubdate(impl_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath = '//div[contains(@class, "left_conter_20190901")]'
    # fulltext_xpath2 = '//div[contains(@class, "fjdown")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 湖北省随州市

def policy_suizhoulist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = re.findall('createPageHTML.*?\((\d+),.*', para_dicts["data"]["1_1"]['html'])

        if page_info:
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            # page_info = re.findall('createPageHTML.*?\(\"(\d+)\",.*', para_dicts["data"]["1_1"]['html'])
            page_info = res.xpath('//div[@class="jspIndex4"]/a[last()-1]/text()').extract_first()
            # page_info = re.sub(".*/(\d+)页", "\\1", page_info_before)
            if page_info:
                max_count = int(page_info) if page_info else 1
                total_page = max_count
            else:
                total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//ul[contains(@class, "article-box jiedu-list")]/li')
        li_list = res.xpath('//table[@class="layui-table"]/tbody/tr')
        if not li_list:
            li_list = res.xpath('//div[@class="xxgk-list1 xxgk-list2"]/ul/li')
        if not li_list:
            li_list = res.xpath('//div[@class="news-list"]/ul/li')
        url_path = './td[1]/a/@href'
        title_path = './td[1]/a/text()'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath('./a[1]/div/text()').get()
                if title is None:
                    title = li.xpath('./a[1]/text()').get()
                    if title is None:
                        title = ""
            if url is None:
                url = li.xpath('./a[1]/@href').get()
                if url is None:
                    continue
            if 'htm' not in url:
                continue
            # list_rawid_alt = list_json["list_rawid_alt"]

            rawid_list = url.split('/')
            if len(rawid_list) > 1:
                rawid = "{}_{}".format(rawid_list[-2],
                                       rawid_list[-1].replace(".jhtml", "").replace(".shtml", "").replace(".html",
                                                                                                          "").replace(
                                           ".htm",
                                           "").replace(
                                           "?", "").strip())
            else:
                rawid = rawid_list[-1].replace(".jhtml", "").replace(".shtml", "").replace("?", "").replace(".html",
                                                                                                            "").replace(
                    ".htm",
                    "").strip()
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99454'
            url_before = f"http://www.suizhou.gov.cn/{callmodel.sql_model.list_rawid}/{list_json['page_info']}.shtml"
            # url_before = "http://www.jingmen.gov.cn"
            article_json["url"] = parse.urljoin(url_before, url).strip()
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./td[3]/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./a/span[last()]/text()").get()
            # pub_date_before_1 = li.xpath("./a/div[@class='write_blo'ck_left']/span[2]/text()").get()
            # pub_date_before_2 = li.xpath("./a/div[@class='write_block_left']/span[1]/text()").get()
            # pub_date_before = pub_date_before_1 + pub_date_before_2
            # if not pub_date_before:
            #     pub_date_before = li.xpath("./span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_suizhouarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_suizhouarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    res = Selector(text=html)
    if not pub_date:
        # pub_date = ''.join(res.xpath(
        # '//div[@id="effect2"]//td[contains(string(),"发") and contains(string(),"布") and contains(string(),"日")]/following-sibling::td[1]//text()|//table[@id="effect2"]//td[contains(string(),"发") and contains(string(),"布") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()
        pub_date = ''.join(res.xpath(
            '//div[contains(string(),"发") and contains(string(),"布") and contains(string(),"日")]/following-sibling::div[1]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date)
    pub_year = pub_date[:4]

    title = ''.join(res.xpath('//h1/text()').extract()).strip()

    if not title:
        title = ''.join(res.xpath('//div[@class="news-detl-title"]/text()').extract()).strip()
    #     title = ''.join(res.xpath(
    #         '//div[@id="effect2"]//td[contains(string(),"标") and contains(string(),"题")]/following-sibling::td[1]//text()').extract()).strip()
    # if not title:
    #     title = ''.join(res.xpath('//p[@class="gz_title"]//text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//div[contains(string(),"文") and contains(string(),"号")]/following-sibling::div[1]/text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//div[contains(string(),"索") and contains(string(),"引")]/following-sibling::div[1]/text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//div[contains(string(),"主") and contains(string(),"词")]/following-sibling::div[1]/text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//div[contains(string(),"主") and contains(string(),"分") and contains(string(),"类")]/following-sibling::div[1]/text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//div[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::div[1]/text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//div[contains(string(),"力") and contains(string(),"效") and contains(string(),"态")]/following-sibling::div[1]/text()').extract()).strip()

    invalid_date = ''.join(res.xpath(
        '//div[contains(string(),"期") and contains(string(),"效") and contains(string(),"时")]/following-sibling::div[1]/text()').extract()).strip()
    impl_date = ''.join(res.xpath(
        '//div[contains(string(),"生") and contains(string(),"效") and contains(string(),"日")]/following-sibling::div[1]/text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//div[contains(string(),"发") and contains(string(),"机") and contains(string(),"构")]/following-sibling::div[1]/text()').extract()).strip()

    organ = clean_organ(organ)
    if organ.startswith('市'):
        organ = '随州' + organ

    fulltext_xpath = '//div[@class="news-detl-c"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@id="fontzoom"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    # if not fulltext:
    # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
    # fulltext_xpath = '//div[@class="article"]'
    # fulltext_xpath = '//ucapcontent'

    # fulltext_xpath2 = '//div[@class="is-downlist"]'
    # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    # fulltext = res.xpath(fulltext_xpath).extract_first()
    # 该网站存在无正文, 仅有下载的情况, 只是节点依然存在
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99454'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "SUIZHOU"
    zt_provider = "suizhougovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = clean_text(pub_no)
    data['index_no'] = clean_text(index_no)
    data['subject'] = clean_text(subject)
    data['keyword'] = clean_text(keyword)
    data['organ'] = clean_text(organ)
    data['written_date'] = clean_pubdate(written_date)
    data['legal_status'] = clean_text(legal_status)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['impl_date'] = clean_pubdate(impl_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath = '//div[contains(@class, "left_conter_20190901")]'
    # fulltext_xpath2 = '//div[contains(@class, "fjdown")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result

# 湖北省恩施土家族苗族自治州
def policy_enshilist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        all_data = json.loads(para_dicts["data"]["1_1"]['html'])

        # 备注: 这个结构模式下一次性会给最大1000条, 因此无视页数, 默认1页, 如果是超过1000页, 请参考第二种方法
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        li_list = all_data["data"]
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["url"]
            title = li["DOCTITLE"]
            if title is None:
                title = li.xpath('./a[1]/text()').get()
                if title is None:
                    title = ""
            if url is None:
                url = li.xpath('./a[1]/@href').get()
                if url is None:
                    continue
            if 'htm' not in url:
                continue
            # list_rawid_alt = list_json["list_rawid_alt"]

            rawid_list = url.split('/')
            if len(rawid_list) > 1:
                rawid = "{}_{}".format(rawid_list[-2],
                                       rawid_list[-1].replace(".jhtml", "").replace(".shtml", "").replace(".html",
                                                                                                          "").replace(
                                           ".htm",
                                           "").replace(
                                           "?", "").strip())
            else:
                rawid = rawid_list[-1].replace(".jhtml", "").replace(".shtml", "").replace("?", "").replace(".html",
                                                                                                            "").replace(
                    ".htm",
                    "").strip()
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99455'
            url_before = f"http://www.enshi.gov.cn/zc/zc/{callmodel.sql_model.list_rawid}.json"
            # url_before = "http://www.jingmen.gov.cn"
            article_json["url"] = parse.urljoin(url_before, url).strip()
            article_json["title"] = title.strip()
            pub_date_before = li["DocRelTime"]

            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_enshilist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = re.findall('createPageHTML.*?\((\d+),.*', para_dicts["data"]["1_1"]['html'])

        if page_info:
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            # page_info = re.findall('createPageHTML.*?\(\"(\d+)\",.*', para_dicts["data"]["1_1"]['html'])
            page_info = res.xpath('//div[@class="jspIndex4"]/a[last()-1]/text()').extract_first()
            # page_info = re.sub(".*/(\d+)页", "\\1", page_info_before)
            if page_info:
                max_count = int(page_info) if page_info else 1
                total_page = max_count
            else:
                total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//ul[contains(@class, "article-box jiedu-list")]/li')
        li_list = res.xpath('//ul[@class="list-t border6"]/li')
        if not li_list:
            li_list = res.xpath('//ul[@class="info-list"]/li')
        if not li_list:
            li_list = res.xpath('//recordset/record')
        url_path = './a/@href'
        title_path = './a/text()'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath('./a[1]/text()').get()
                if title is None:
                    title = ""
            if url is None:
                url = li.xpath('./a[1]/@href').get()
                if url is None:
                    continue
            if 'htm' not in url:
                continue
            # list_rawid_alt = list_json["list_rawid_alt"]

            rawid_list = url.split('/')
            if len(rawid_list) > 1:
                rawid = "{}_{}".format(rawid_list[-2],
                                       rawid_list[-1].replace(".jhtml", "").replace(".shtml", "").replace(".html",
                                                                                                          "").replace(
                                           ".htm",
                                           "").replace(
                                           "?", "").strip())
            else:
                rawid = rawid_list[-1].replace(".jhtml", "").replace(".shtml", "").replace("?", "").replace(".html",
                                                                                                            "").replace(
                    ".htm",
                    "").strip()
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99455'
            url_before = f"http://www.enshi.gov.cn/{callmodel.sql_model.list_rawid}/{list_json['page_info']}.html"
            # url_before = "http://www.jingmen.gov.cn"
            article_json["url"] = parse.urljoin(url_before, url).strip()
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./span/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./a/span/text()").get()
            # pub_date_before_1 = li.xpath("./a/div[@class='write_blo'ck_left']/span[2]/text()").get()
            # pub_date_before_2 = li.xpath("./a/div[@class='write_block_left']/span[1]/text()").get()
            # pub_date_before = pub_date_before_1 + pub_date_before_2
            # if not pub_date_before:
            #     pub_date_before = li.xpath("./span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result

def policy_enshilist3_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        datas_forward = para_dicts["data"]["1_1"]['html']
        data_1 = re.findall("const.*?requestData = (\[.*\]).*?var.*?app.*", datas_forward.replace("\n", "").replace("\t", ""))
        data_2 = re.sub("\"doc\".*?\"dwxz\"", '"dwxz"', data_1[0])

        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        li_list = eval(data_2)

        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["url"]
            title = li["title"]
            if 'htm' not in url:
                continue
            # list_rawid_alt = list_json["list_rawid_alt"]

            rawid_list = url.split('/')
            if len(rawid_list) > 1:
                rawid = "{}_{}".format(rawid_list[-2],
                                       rawid_list[-1].replace(".jhtml", "").replace(".shtml", "").replace(".html",
                                                                                                          "").replace(
                                           ".htm",
                                           "").replace(
                                           "?", "").strip())
            else:
                rawid = rawid_list[-1].replace(".jhtml", "").replace(".shtml", "").replace("?", "").replace(".html",
                                                                                                            "").replace(
                    ".htm",
                    "").strip()
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99455'
            url_before = f"http://www.enshi.gov.cn"
            # url_before = "http://www.jingmen.gov.cn"
            article_json["url"] = parse.urljoin(url_before, url).strip()
            article_json["title"] = title.strip()
            pub_date_before = li["docreltime"]
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result

def policy_enshiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result

def policy_enshiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    res = Selector(text=html)
    if not pub_date:
        pub_date = ''.join(res.xpath(
            '//b[contains(string(),"发") and contains(string(),"布") and contains(string(),"日")]/following-sibling::span[1]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date)
    pub_year = pub_date[:4]

    title = ''.join(res.xpath('//h2/text()').extract()).strip()

    if not title:
        title = ''.join(res.xpath('//div[@class="content_body_tit"]/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath(
        '//th[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]/text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//th[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]/text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//th[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]/text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//th[contains(string(),"主") and contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]/text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//th[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]/text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//th[contains(string(),"力") and contains(string(),"效") and contains(string(),"态")]/following-sibling::td[1]/text()').extract()).strip()

    invalid_date = ''.join(res.xpath(
        '//th[contains(string(),"期") and contains(string(),"效") and contains(string(),"时")]/following-sibling::td[1]/text()').extract()).strip()
    impl_date = ''.join(res.xpath(
        '//th[contains(string(),"生") and contains(string(),"效") and contains(string(),"日")]/following-sibling::td[1]/text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//th[contains(string(),"发") and contains(string(),"单") and contains(string(),"位")]/following-sibling::td[1]/text()').extract()).strip()

    organ = clean_organ(organ)
    if organ.startswith('州'):
        organ = '恩施土家族苗族自治' + organ

    # fulltext_xpath = '//div[@id="zoom"]'
    # fulltext = res.xpath(fulltext_xpath).extract_first()
    # if not fulltext:
    fulltext_xpath = '//div[@class="article-box"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
    # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@class="article1-box"]'
    # fulltext_xpath = '//ucapcontent'

    # fulltext_xpath2 = '//div[@class="is-downlist"]'
    # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
        fulltext = res.xpath(fulltext_xpath).extract_first()
    # 该网站存在无正文, 仅有下载的情况, 只是节点依然存在
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99455'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "ENSHI"
    zt_provider = "enshigovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = clean_text(pub_no)
    data['index_no'] = clean_text(index_no)
    data['subject'] = clean_text(subject)
    data['keyword'] = clean_text(keyword)
    data['organ'] = clean_text(organ)
    data['written_date'] = clean_pubdate(written_date)
    data['legal_status'] = clean_text(legal_status)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['impl_date'] = clean_pubdate(impl_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath = '//div[contains(@class, "left_conter_20190901")]'
    # fulltext_xpath2 = '//div[contains(@class, "fjdown")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


def policy_fgwhubeiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    # title = ''.join(res.xpath('//div[@id="title"]//text()|//div[@class="detail_title"]//text()').extract()).strip()
    # if not title:
    #     title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[contains(@class,"table")]//th[contains(text(),"文") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    if pub_no == '无':
        pub_no = ''
    index_no = ''.join(res.xpath('//table[contains(@class,"table")]//th[contains(text(),"索") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[contains(@class,"table")]//th[contains(text(),"分") and contains(text(),"类")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[contains(@class,"table")]//th[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[contains(@class,"table")]//th[contains(text(),"效力状态")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[contains(@class,"table")]//th[contains(text(),"发文日期")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('省'):
        organ = '湖北' + organ
    fulltext_xpath = '//div[@class="article-box"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99433'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "FGWHUBEI"
    zt_provider = "fgwhubeigovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[@id="appendix2"])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_jxthubeiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    # title = ''.join(res.xpath('//div[@id="title"]//text()|//div[@class="detail_title"]//text()').extract()).strip()
    # if not title:
    #     title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[contains(@class,"table")]//th[contains(text(),"文") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    if pub_no == '无':
        pub_no = ''
    index_no = ''.join(res.xpath('//table[contains(@class,"table")]//th[contains(text(),"索")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[contains(@class,"table")]//th[text()="分"]/following::td[1]//text()').extract()).strip()
    organ = ''.join(res.xpath('//table[contains(@class,"table")]//th[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[contains(@class,"table")]//th[text()="有"]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[contains(@class,"table")]//th[contains(text(),"发文日期")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('省'):
        organ = '湖北' + organ
    fulltext_xpath = '//div[@class="article-box"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99434'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "JXTHUBEI"
    zt_provider = "jxthubeigovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_kjthubeiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    # title = ''.join(res.xpath('//div[@id="title"]//text()|//div[@class="detail_title"]//text()').extract()).strip()
    # if not title:
    #     title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[contains(@class,"table")]//th[contains(text(),"文") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    if pub_no == '无':
        pub_no = ''
    index_no = ''.join(res.xpath('//table[contains(@class,"table")]//th[contains(text(),"索") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[contains(@class,"table")]//th[contains(text(),"分") ]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[contains(@class,"table")]//th[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[contains(@class,"table")]//th[contains(text(),"有") and contains(text(),"性")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[contains(@class,"table")]//th[contains(text(),"发文时间")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('省'):
        organ = '湖北' + organ
    fulltext_xpath = '//div[@class="article-box"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99435'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "KJTHUBEI"
    zt_provider = "kjthubeigovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_jythubeiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    # title = ''.join(res.xpath('//div[@id="title"]//text()|//div[@class="detail_title"]//text()').extract()).strip()
    # if not title:
    #     title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[contains(@class,"table")]//th[text()="文"]/following::td[1]/text()').extract()).strip()
    if pub_no == '无':
        pub_no = ''
    index_no = ''.join(res.xpath('//table[contains(@class,"table")]//th[text()="索"]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[contains(@class,"table")]//th[text()="分"]/following::td[1]//text()').extract()).strip()
    organ = ''.join(res.xpath('//table[contains(@class,"table")]//th[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[contains(@class,"table")]//th[text()="有"]/following::td[1]//text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[contains(@class,"table")]//th[contains(text(),"发文日期")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('省'):
        organ = '湖北' + organ
    fulltext_xpath = '//div[@class="article-box"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99436'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "JYTHUBEI"
    zt_provider = "jythubeigovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_mzthubeiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    # title = ''.join(res.xpath('//div[@id="title"]//text()|//div[@class="detail_title"]//text()').extract()).strip()
    # if not title:
    #     title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[contains(@class,"table")]//th[contains(text(),"文") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    if pub_no == '无':
        pub_no = ''
    index_no = ''.join(res.xpath('//table[contains(@class,"table")]//th[contains(text(),"索") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[contains(@class,"table")]//th[contains(text(),"分") ]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[contains(@class,"table")]//th[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[contains(@class,"table")]//th[contains(text(),"效力状态") ]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[contains(@class,"table")]//th[contains(text(),"发文日期") ]/following::td[1]/text()').extract()).strip()
    if organ.startswith('省'):
        organ = '湖北' + organ
    fulltext_xpath = '//div[@class="article-box"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99437'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "MZTHUBEI"
    zt_provider = "mzthubeigovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_czthubeiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    # title = ''.join(res.xpath('//div[@id="title"]//text()|//div[@class="detail_title"]//text()').extract()).strip()
    # if not title:
    #     title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[contains(@class,"table")]//th[text()="文"]/following::td[1]/text()').extract()).strip()
    if pub_no == '无':
        pub_no = ''
    index_no = ''.join(res.xpath('//table[contains(@class,"table")]//th[text()="索"]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[contains(@class,"table")]//th[text()="分"]/following::td[1]//text()').extract()).strip()
    organ = ''.join(res.xpath('//table[contains(@class,"table")]//th[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[contains(@class,"table")]//th[text()="有"]/following::td[1]//text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[contains(@class,"table")]//th[contains(text(),"发文日期")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('省'):
        organ = '湖北' + organ
    fulltext_xpath = '//div[@class="article-box"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99438'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CZTHUBEI"
    zt_provider = "czthubeigovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_rsthubeiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    # title = ''.join(res.xpath('//div[@id="title"]//text()|//div[@class="detail_title"]//text()').extract()).strip()
    # if not title:
    #     title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[contains(@class,"table")]//th[text()="文"]/following::td[1]/text()').extract()).strip()
    if pub_no == '无':
        pub_no = ''
    index_no = ''.join(res.xpath('//table[contains(@class,"table")]//th[text()="索"]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[contains(@class,"table")]//th[contains(text(),"主题分类")]/following::td[1]//text()').extract()).strip()
    organ = ''.join(res.xpath('//table[contains(@class,"table")]//th[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[contains(@class,"table")]//th[contains(text(),"效力状态")]/following::td[1]//text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[contains(@class,"table")]//th[contains(text(),"发文日期")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('省'):
        organ = '湖北' + organ
    fulltext_xpath = '//div[@class="article-box"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99439'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "RSTHUBEI"
    zt_provider = "rsthubeigovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_zjthubeiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    # title = ''.join(res.xpath('//div[@id="title"]//text()|//div[@class="detail_title"]//text()').extract()).strip()
    # if not title:
    #     title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[contains(@class,"table")]//th[contains(text(),"文") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    if pub_no == '无':
        pub_no = ''
    index_no = ''.join(res.xpath('//table[contains(@class,"table")]//th[contains(text(),"索") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[contains(@class,"table")]//th[contains(text(),"分") ]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[contains(@class,"table")]//th[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[contains(@class,"table")]//th[contains(text(),"效力状态") ]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[contains(@class,"table")]//th[contains(text(),"发文日期") ]/following::td[1]/text()').extract()).strip()
    if organ.startswith('省'):
        organ = '湖北' + organ
    fulltext_xpath = '//div[@class="article-box"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99441'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "ZJTHUBEI"
    zt_provider = "zjthubeigovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_wjwhubeiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    # title = ''.join(res.xpath('//div[@id="title"]//text()|//div[@class="detail_title"]//text()').extract()).strip()
    # if not title:
    #     title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[contains(@class,"table")]//th[contains(text(),"文") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    if pub_no == '无':
        pub_no = ''
    index_no = ''.join(res.xpath('//table[contains(@class,"table")]//th[contains(text(),"索") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[contains(@class,"table")]//th[contains(text(),"分") ]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[contains(@class,"table")]//th[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[contains(@class,"table")]//th[contains(text(),"有") ]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[contains(@class,"table")]//th[contains(text(),"发文日期") ]/following::td[1]/text()').extract()).strip()
    if organ.startswith('省'):
        organ = '湖北' + organ
    fulltext_xpath = '//div[@class="article-box"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99442'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "WJWHUBEI"
    zt_provider = "wjwhubeigovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  湖北省武汉市
def policy_wuhanlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            max_count = re.findall('createPageHTML\((\d+)', para_dicts["data"]["1_1"]['html'])
            max_count = int(max_count[0]) if max_count else 1
            total_page = max_count
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": list_json['page_info'].replace('index', f"index_{page}")}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        list_rawid = callmodel.sql_model.list_rawid
        if 'ghjh' in list_rawid:
            res = Selector(text=para_dicts["data"]["1_1"]['html'])
            li_list = res.xpath('//div[@class="articleList"]/ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                base_url = f'http://www.wuhan.gov.cn/{callmodel.sql_model.list_rawid}/index.shtml'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99443'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = ''.join(li.xpath('span/text()').extract()).strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        elif 'zwgk/tzgg' in list_rawid:
            res = Selector(text=para_dicts["data"]["1_1"]['html'])
            li_list = res.xpath('//div[@class="articleList"]/ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = ''.join(re.findall('url = "(.*?)"', li.extract()))
                base_url = f'http://www.wuhan.gov.cn/{callmodel.sql_model.list_rawid}/index.shtml'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99443'
                article_json["url"] = url
                article_json["title"] = re.findall('var title = "(.*?)"', li.extract())[0]
                article_json["pub_date"] = re.findall('writeln\("(.*?)"', li.extract())[-2]
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        elif 'zcwjjsonp' in list_rawid:
            li_list = eval(re.findall('pushInfoJsonpCallBack\((.*)\)', para_dicts["data"]["1_1"]['html'], re.S)[0].replace('\n', '').replace('\xa0', ''))
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li['url']
                base_url = f'http://www.wuhan.gov.cn/{callmodel.sql_model.list_rawid}/'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99443'
                article_json["url"] = url
                article_json["title"] = li['DOCTITLE']
                article_json["pub_date"] = li['DocRelTime']
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            if '_array.push(' in para_dicts["data"]["1_1"]['html']:
                li_list = re.findall('_array\.push\((.*?)\);', para_dicts["data"]["1_1"]['html'], re.S)
            else:
                text_str = re.findall('var _array = \[(.*?)\];', para_dicts["data"]["1_1"]['html'], re.S)[0]
                li_list = re.findall('\{.*?\}', text_str)
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = re.findall('DOCPUBURL: *"(.*?)"', li)[0]
                base_url = f'http://www.wuhan.gov.cn/{callmodel.sql_model.list_rawid}/'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99443'
                article_json["url"] = url
                if '_array.push(' in para_dicts["data"]["1_1"]['html']:
                    article_json["title"] = re.findall('title: "(.*?)"', li)[0]
                    article_json["pub_date"] = re.findall('PUBDATE: "(.*?)"', li)[0]
                else:
                    article_json["title"] = re.findall('FileName:"(.*?)"', li)[0]
                    article_json["pub_date"] = re.findall('PUBDATE:"(.*?)"', li)[0]
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_wuhanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_wuhanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    # title = ''.join(res.xpath('//div[@class="article  pub_border"]/h2//text()').extract()).strip()
    # if not title:
    #     title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="articleInfo"]//li[contains(text(),"发文字号：")]/text()').extract()).strip()
    pub_no = pub_no.split('：')[-1].strip()
    index_no = ''.join(res.xpath('//div[@class="articleInfo"]//li[contains(text(),"索引号：")]/text()').extract()).strip()
    index_no = index_no.split('：')[-1].strip()
    subject = ''.join(res.xpath('//div[@class="articleInfo"]//li[contains(text(),"主题分类：")]/text()').extract()).strip()
    subject = subject.split('：')[-1].strip()
    written_date = ''.join(res.xpath('//div[@class="articleInfo"]//li[contains(text(),"成文日期：")]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="articleInfo"]//li[contains(text(),"有效性：")]/text()').extract()).strip()
    legal_status = legal_status.split('：')[-1].strip()
    organ = ''.join(res.xpath('//div[@class="articleInfo"]//li[contains(text(),"发文机构：")]/text()').extract()).strip()
    organ = organ.split('：')[-1].strip()
    if organ.startswith('市'):
        organ = '武汉' + organ

    fulltext_xpath = '//div[@class="article no_border"]|//div[@class="article"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99443'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "WUHAN"
    zt_provider = "wuhangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  湖北省宜昌市
def policy_yichanglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            max_count = re.findall('<a class="a1">(\d+)条</a>', para_dicts["data"]["1_1"]['html'])
            max_count = int(max_count[0]) if max_count else 1
            total_page = math.ceil(int(max_count)/15)
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace('index', f"index_{page}")}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        list_rawid = callmodel.sql_model.list_rawid
        # if 'ghjh' in list_rawid:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//div[@class="txtlist-con"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('div[1]/a/@href').extract_first()
            base_url = f'http://www.yichang.gov.cn/{callmodel.sql_model.list_rawid}.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url or '&id=' in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99447'
            article_json["url"] = url
            article_json["title"] = li.xpath('div[1]/a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('div[2]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_yichangarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_yichangarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="txtcontent-div"]/h1//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    fulltext_xpath = '//div[@class="txtcontent-div"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99447'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "YICHANG"
    zt_provider = "yichanggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_yichanglist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            max_count = re.findall('"count":(\d+)', para_dicts["data"]["1_1"]['html'])
            if not max_count:
                max_count = re.findall('"allnums":"(\d+)', para_dicts["data"]["1_1"]['html'])
            max_count = int(max_count[0]) if max_count else 1
            total_page = math.ceil(int(max_count)/20)
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace('index', f"index_{page}")}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        list_rawid = callmodel.sql_model.list_rawid
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        if '&cid=560' in list_rawid:
            li_list = html_json['lists']
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li['n_id']
                url = f'http://www.yichang.gov.cn/zfxxgk/show.html?aid=1&id={href}&depid=846'
                art_url = f'https://xxgkapi.yichang.gov.cn/show/detail?id={href}'
                temp["rawid"] = href
                temp["sub_db_id"] = '99447'
                article_json["url"] = url
                article_json["art_url"] = art_url
                article_json["title"] = li['title']
                article_json["pub_date"] = li['vc_inputtime']
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = html_json['data']['lists']
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li['id']
                url = f'http://xxgk.yichang.gov.cn/show.html?aid=1&id={href}&depid=847&t=4'
                art_url = f'https://xxgkapi.yichang.gov.cn/other/governmentdoc?id={href}'
                temp["rawid"] = href
                temp["sub_db_id"] = '99447'
                article_json["url"] = url
                article_json["art_url"] = art_url
                article_json["title"] = li['filename']
                article_json["pub_date"] = li['docreltime']
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_yichangarticle1_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_yichangarticle1_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    html_json = json.loads(callmodel.para_dicts['data']['1_1']['html'])

    if '"dochtmlcon"' in callmodel.para_dicts['data']['1_1']['html']:
        html = html_json['data']['dochtmlcon']
        pub_no = html_json['data']['filenum']
        index_no = html_json['data']['idxid']
        subject = html_json['data']['subjectclass']
        impl_date = html_json['data']['efectdate']
        legal_status = html_json['data']['status']
        legal_status = '有效' if legal_status=='3' else ''
        organ = html_json['data']['publisher']
    else:
        html = html_json[0]['content']
        pub_no = html_json[0]['vc_fielid']
        index_no = html_json[0]['vc_number']
        subject = html_json[0]['vc_ztfl']
        impl_date = ''
        legal_status = html_json[0]['status']
        legal_status = '有效' if legal_status == '3' else ''
        organ = html_json[0]['vc_department']
    if organ.startswith('市'):
        organ = '宜昌' + organ
    if not html:
        html = ''
    res = Selector(text=html)
    fulltext = html

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99447'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "YICHANG"
    zt_provider = "yichanggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'(//body)')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   湖北省鄂州市
def policy_ezhoulist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            max_count = re.findall('createPageHTML\((\d+)', para_dicts["data"]["1_1"]['html'])
            max_count = int(max_count[0]) if max_count else 1
            total_page = max_count
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"index_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'mlxc/gggs' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="list"]/ul/li|//div[@class="listtext_listpage"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.ezhou.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99451'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_ezhoulist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        li_list = html_json['data']['list']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['docpuburl']
            base_url = f'http://www.ezhou.gov.cn/gk/zc/gfxwj/'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99451'
            article_json["url"] = url
            article_json["title"] = li['doctitle']
            article_json["pub_date"] = li['pubdate']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_ezhouarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_ezhouarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    provider_url = article_json['url']
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="xqym-title"]/h2//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="xxgk_attr"]//td[contains(text(),"文") and contains(text(),"号")]/following::td[1]//text()').extract()).strip()
    pub_no = '' if pub_no == '无' else pub_no
    index_no = ''.join(res.xpath('//div[@class="xxgk_attr"]//td[contains(text(),"索") and contains(text(),"号")]/following::td[1]//text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="xxgk_attr"]//td[contains(text(),"信息分类")]/following::td[1]//text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="xxgk_attr"]//td[contains(text(),"发布日期")]/following::td[1]//text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="xxgk_attr"]//td[contains(text(),"有效性")]/following::td[1]//text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="xxgk_attr"]//td[contains(text(),"发布机构")]/following::td[1]//text()').extract()).strip()
    if organ.startswith('市'):
        organ = '鄂州' + organ

    fulltext_xpath = '//div[contains(@class,"xqym-p")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99451'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "EZHOU"
    zt_provider = "ezhougovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   湖南省住房和城乡建设厅
def policy_zjthunanlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            max_count = html_json['data']['total']
            rows = html_json['data']['rows']
            total_page = math.ceil(max_count/rows)
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"index_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        li_list = html_json['data']['results']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['url']
            base_url = f'https://zjt.hunan.gov.cn/zjt/xxgk/xinxigongkaimulu/tzgg/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99464'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['publishedTimeStr']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_zjthunanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_zjthunanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    provider_url = article_json['url']
    res = Selector(text=html)

    index_no = ''.join(res.xpath('//div[@class="xxgk_top_frame"]//li[contains(text(),"索引号：")]//text()').extract()).strip()
    index_no = index_no.split('：')[-1].strip()
    subject = ''.join(res.xpath('//div[@class="xxgk_top_frame"]//li[contains(text(),"主题分类：")]//text()').extract()).strip()
    subject_word = ''.join(res.xpath('//div[@class="xxgk_top_frame"]//li[contains(text(),"主题词：")]//text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="xxgk_top_frame"]//li[contains(text(),"发布机构：")]//text()').extract()).strip()
    if organ.startswith('省'):
        organ = '湖南' + organ

    fulltext_xpath = '//div[@id="j-show-body"]|//div[@class="main_con_zw"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99464'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "ZJTHUNAN"
    zt_provider = "zjthunangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


