import json
import time
import math
import re
import traceback
import urllib
from urllib import parse
import base64

from parsel import Selector
from re_common.baselibrary.database.mysql import json_update
from re_common.baselibrary.utils.basedict import BaseDicts
from re_common.baselibrary.utils.basetime import BaseTime
from re_common.baselibrary.utils.baseurl import BaseUrl
from re_common.vip.baseencodeid import BaseLngid

from apps.crawler_platform.core_platform.core_sql import CoreSqlValue
from apps.crawler_platform.core_platform.g_model import DealModel, CallBackModel, DealInsertModel, DealUpdateModel, \
    OperatorSqlModel, DealItemModel, \
    EtlDealModel, PolicyListModel, PolicyArticleModel

__all__ = [
    "policy_fzggwahlist1_callback",
    "policy_fzggwahlist2_callback",
    "policy_fzggwaharticle_callback",
    "policy_fzggwaharticle_etl_callback",
    "policy_jxahlist1_callback",
    "policy_jxahlist2_callback",
    "policy_jxaharticle_callback",
    "policy_jxaharticle_etl_callback",
    "policy_kjtahlist1_callback",
    "policy_kjtahlist2_callback",
    "policy_kjtahlist3_callback",
    "policy_kjtaharticle_callback",
    "policy_kjtaharticle_etl_callback",
    "policy_jytahlist1_callback",
    "policy_jytahlist2_callback",
    "policy_jytahlist3_callback",
    "policy_jytaharticle_callback",
    "policy_jytaharticle_etl_callback",
    "policy_mzahlist1_callback",
    "policy_mzahlist2_callback",
    "policy_mzaharticle_callback",
    "policy_mzaharticle_etl_callback",
    "policy_cztahlist1_callback",
    "policy_cztahlist2_callback",
    "policy_cztaharticle_callback",
    "policy_cztaharticle_etl_callback",
    "policy_hrssahlist1_callback",
    "policy_hrssahlist2_callback",
    "policy_hrssahlist3_callback",
    "policy_hrssaharticle_callback",
    "policy_hrssaharticle_etl_callback",
    "policy_nyncahlist1_callback",
    "policy_nyncahlist2_callback",
    "policy_nyncahlist3_callback",
    "policy_nyncaharticle_callback",
    "policy_nyncaharticle_etl_callback",
    "policy_dohurdahlist1_callback",
    "policy_dohurdahlist2_callback",
    "policy_dohurdaharticle_callback",
    "policy_dohurdaharticle_etl_callback",
    "policy_wjwahlist1_callback",
    "policy_wjwahlist2_callback",
    "policy_wjwahlist3_callback",
    "policy_wjwaharticle_callback",
    "policy_wjwaharticle_etl_callback",
    "policy_huainanlist1_callback",
    "policy_huainanlist2_callback",
    "policy_huainanlist3_callback",
    "policy_huainanarticle_callback",
    "policy_huainanarticle_etl_callback",
    "policy_maslist1_callback",
    "policy_maslist2_callback",
    "policy_maslist3_callback",
    "policy_maslist4_callback",
    "policy_masarticle_callback",
    "policy_masarticle_etl_callback",
    "policy_huaibeilist1_callback",
    "policy_huaibeilist2_callback",
    "policy_huaibeilist3_callback",
    "policy_huaibeiarticle_callback",
    "policy_huaibeiarticle_etl_callback",
    "policy_anqinglist1_callback",
    "policy_anqinglist2_callback",
    "policy_anqinglist3_callback",
    "policy_anqingarticle_callback",
    "policy_anqingarticle_etl_callback",
    "policy_huangshanlist1_callback",
    "policy_huangshanlist2_callback",
    "policy_huangshanlist3_callback",
    "policy_huangshanlist4_callback",
    "policy_huangshanlist5_callback",
    "policy_huangshanarticle_callback",
    "policy_huangshanarticle_etl_callback",
    "policy_fylist1_callback",
    "policy_fylist2_callback",
    "policy_fyarticle_callback",
    "policy_fyarticle_etl_callback",
    "policy_ahszlist1_callback",
    "policy_ahszlist2_callback",
    "policy_ahszlist3_callback",
    "policy_ahszarticle_callback",
    "policy_ahszarticle_etl_callback",
    "policy_chuzhoulist1_callback",
    "policy_chuzhoulist2_callback",
    "policy_chuzhoulist3_callback",
    "policy_chuzhouarticle_callback",
    "policy_chuzhouarticle_etl_callback",
    "policy_luanlist_callback",
    "policy_luanarticle_callback",
    "policy_luanarticle_etl_callback",
    "policy_xuanchenglist1_callback",
    "policy_xuanchenglist2_callback",
    "policy_xuanchengarticle_callback",
    "policy_xuanchengarticle_etl_callback",
    "policy_chizhoulist1_callback",
    "policy_chizhoulist2_callback",
    "policy_chizhoulist3_callback",
    "policy_chizhouarticle_callback",
    "policy_chizhouarticle_etl_callback",
    "policy_bozhoulist1_callback",
    "policy_bozhoulist2_callback",
    "policy_bozhouarticle_callback",
    "policy_bozhouarticle_etl_callback",
    "policy_planhainanlist_callback",
    "policy_planhainanarticle_callback",
    "policy_planhainanarticle_etl_callback",
    "policy_iitbhainanlist_callback",
    "policy_iitbhainanarticle_callback",
    "policy_iitbhainanarticle_etl_callback",
    "policy_dosthainanlist1_callback",
    "policy_dosthainanlist2_callback",
    "policy_dosthainanarticle_callback",
    "policy_dosthainanarticle_etl_callback",
    "policy_eduhainanlist1_callback",
    "policy_eduhainanlist2_callback",
    "policy_eduhainanarticle_callback",
    "policy_eduhainanarticle_etl_callback",
    "policy_mzhainanlist_callback",
    "policy_mzhainanarticle_callback",
    "policy_mzhainanarticle_etl_callback",
    "policy_mofhainanlist_callback",
    "policy_mofhainanarticle_callback",
    "policy_mofhainanarticle_etl_callback",
    "policy_hrsshainanlist_callback",
    "policy_hrsshainanarticle_callback",
    "policy_hrsshainanarticle_etl_callback",
    "policy_agrihainanlist1_callback",
    "policy_agrihainanlist2_callback",
    "policy_agrihainanarticle_callback",
    "policy_agrihainanarticle_etl_callback",
    "policy_zjthainanlist_callback",
    "policy_zjthainanarticle_callback",
    "policy_zjthainanarticle_etl_callback",
    "policy_wsthainanlist1_callback",
    "policy_wsthainanlist2_callback",
    "policy_wsthainanlist3_callback",
    "policy_wsthainanarticle_callback",
    "policy_wsthainanarticle_etl_callback",
    "policy_haikoulist_callback",
    "policy_haikouarticle_callback",
    "policy_haikouarticle_etl_callback",
    "policy_sanyalist_callback",
    "policy_sanyaarticle_callback",
    "policy_sanyaarticle_etl_callback",
    "policy_sanshalist_callback",
    "policy_sanshaarticle_callback",
    "policy_sanshaarticle_etl_callback",
    "policy_danzhoulist1_callback",
    "policy_danzhoulist2_callback",
    "policy_danzhouarticle_callback",
    "policy_danzhouarticle_etl_callback",
    "policy_drcjiangxilist_callback",
    "policy_drcjiangxiarticle_callback",
    "policy_drcjiangxiarticle_etl_callback",
    "policy_jxciitlist_callback",
    "policy_jxciitlist1_callback",
    "policy_jxciitarticle_callback",
    "policy_jxciitarticle_etl_callback",
    "policy_kjtjiangxilist_callback",
    "policy_kjtjiangxiarticle_callback",
    "policy_kjtjiangxiarticle_etl_callback",
    "policy_jytjiangxilist1_callback",
    "policy_jytjiangxilist2_callback",
    "policy_jytjiangxilist3_callback",
    "policy_jytjiangxiarticle_callback",
    "policy_jytjiangxiarticle_etl_callback",
    "policy_mztjiangxilist_callback",
    "policy_mztjiangxiarticle_callback",
    "policy_mztjiangxiarticle_etl_callback",
    "policy_jxfjiangxilist_callback",
    "policy_jxfjiangxiarticle_callback",
    "policy_jxfjiangxiarticle_etl_callback",
    "policy_rstjiangxilist_callback",
    "policy_rstjiangxiarticle_callback",
    "policy_rstjiangxiarticle_etl_callback",
    "policy_nyncjiangxilist_callback",
    "policy_nyncjiangxiarticle_callback",
    "policy_nyncjiangxiarticle_etl_callback",
    "policy_jxjstlist_callback",
    "policy_jxjstarticle_callback",
    "policy_jxjstarticle_etl_callback",
    "policy_hcjiangxilist_callback",
    "policy_hcjiangxiarticle_callback",
    "policy_hcjiangxiarticle_etl_callback",
    "policy_nclist_callback",
    "policy_nclist1_callback",
    "policy_ncarticle_callback",
    "policy_ncarticle_etl_callback",
    "policy_jianlist_callback",
    "policy_jianarticle_callback",
    "policy_jianarticle_etl_callback",
    "policy_jiujianglist_callback",
    "policy_jiujiangarticle_callback",
    "policy_jiujiangarticle_etl_callback",
    "policy_zgsrlist_callback",
    "policy_zgsrarticle_callback",
    "policy_zgsrarticle_etl_callback",
    "policy_jxfzlist_callback",
    "policy_jxfzarticle_callback",
    "policy_jxfzarticle_etl_callback",
    "policy_yichunlist_callback",
    "policy_yichunarticle_callback",
    "policy_yichunarticle_etl_callback",
    "policy_ganzhoulist1_callback",
    "policy_ganzhoulist2_callback",
    "policy_ganzhouarticle_callback",
    "policy_ganzhouarticle_etl_callback",
    "policy_xinyulist_callback",
    "policy_xinyuarticle_callback",
    "policy_xinyuarticle_etl_callback",
    "policy_yingtanlist_callback",
    "policy_yingtanlist1_callback",
    "policy_yingtanarticle_callback",
    "policy_yingtanarticle_etl_callback",
    "policy_drcgdlist1_callback",
    "policy_drcgdlist2_callback",
    "policy_drcgdarticle_callback",
    "policy_drcgdarticle_etl_callback",
    "policy_gdiigdlist_callback",
    "policy_gdiigdarticle_callback",
    "policy_gdiigdarticle_etl_callback",
    "policy_gdstcgdlist1_callback",
    "policy_gdstcgdlist2_callback",
    "policy_gdstcgdarticle_callback",
    "policy_gdstcgdarticle_etl_callback",
    "policy_edugdlist1_callback",
    "policy_edugdlist2_callback",
    "policy_edugdarticle_callback",
    "policy_edugdarticle_etl_callback",
    "policy_smztgdlist1_callback",
    "policy_smztgdlist2_callback",
    "policy_smztgdarticle_callback",
    "policy_smztgdarticle_etl_callback",
    "policy_cztgdlist1_callback",
    "policy_cztgdlist2_callback",
    "policy_cztgdarticle_callback",
    "policy_cztgdarticle_etl_callback",
    "policy_hrssgdlist1_callback",
    "policy_hrssgdlist2_callback",
    "policy_hrssgdarticle_callback",
    "policy_hrssgdarticle_etl_callback",
    "policy_daragdlist1_callback",
    "policy_daragdlist2_callback",
    "policy_daragdarticle_callback",
    "policy_daragdarticle_etl_callback",
    "policy_zfcxjstgdlist1_callback",
    "policy_zfcxjstgdlist2_callback",
    "policy_zfcxjstgdarticle_callback",
    "policy_zfcxjstgdarticle_etl_callback",
    "policy_wsjkwgdlist1_callback",
    "policy_wsjkwgdlist2_callback",
    "policy_wsjkwgdarticle_callback",
    "policy_wsjkwgdarticle_etl_callback",
    "policy_gzlist1_callback",
    "policy_gzlist2_callback",
    "policy_gzarticle_callback",
    "policy_gzarticle_etl_callback",
    "policy_szlist1_callback",
    "policy_szlist_callback",
    "policy_szarticle_callback",
    "policy_szarticle_etl_callback",
    "policy_zhuhailist_callback",
    "policy_zhuhaiarticle_callback",
    "policy_zhuhaiarticle_etl_callback",
    "policy_shantoulist1_callback",
    "policy_shantoulist2_callback",
    "policy_shantouarticle_callback",
    "policy_shantouarticle_etl_callback",
    "policy_foshanlist1_callback",
    "policy_foshanlist2_callback",
    "policy_foshanarticle_callback",
    "policy_foshanarticle_etl_callback",
    "policy_sglist_callback",
    "policy_sgarticle_callback",
    "policy_sgarticle_etl_callback",
    "policy_heyuanlist_callback",
    "policy_heyuanarticle_callback",
    "policy_heyuanarticle_etl_callback",
    "policy_meizhoulist1_callback",
    "policy_meizhoulist2_callback",
    "policy_meizhouarticle_callback",
    "policy_meizhouarticle_etl_callback",
    "policy_huizhoulist1_callback",
    "policy_huizhoulist2_callback",
    "policy_huizhouarticle_callback",
    "policy_huizhouarticle_etl_callback",
    "policy_shanweilist1_callback",
    "policy_shanweilist2_callback",
    "policy_shanweiarticle_callback",
    "policy_shanweiarticle_etl_callback",
    "policy_dglist1_callback",
    "policy_dglist2_callback",
    "policy_dgarticle_callback",
    "policy_dgarticle_etl_callback",
    "policy_zslist1_callback",
    "policy_zslist2_callback",
    "policy_zsarticle_callback",
    "policy_zsarticle_etl_callback",
    "policy_jiangmenlist1_callback",
    "policy_jiangmenlist2_callback",
    "policy_jiangmenarticle_callback",
    "policy_jiangmenarticle_etl_callback",
    "policy_yangjianglist_callback",
    "policy_yangjiangarticle_callback",
    "policy_yangjiangarticle_etl_callback",
    "policy_zhanjianglist1_callback",
    "policy_zhanjianglist2_callback",
    "policy_zhanjiangarticle_callback",
    "policy_zhanjiangarticle_etl_callback",
    "policy_maominglist1_callback",
    "policy_maominglist2_callback",
    "policy_maomingarticle_callback",
    "policy_maomingarticle_etl_callback",
    "policy_zhaoqinglist1_callback",
    "policy_zhaoqinglist2_callback",
    "policy_zhaoqingarticle_callback",
    "policy_zhaoqingarticle_etl_callback",
    "policy_gdqylist_callback",
    "policy_gdqyarticle_callback",
    "policy_gdqyarticle_etl_callback",
    "policy_chaozhoulist_callback",
    "policy_chaozhouarticle_callback",
    "policy_chaozhouarticle_etl_callback",
    "policy_jieyanglist1_callback",
    "policy_jieyanglist2_callback",
    "policy_jieyangarticle_callback",
    "policy_jieyangarticle_etl_callback",
    "policy_yunfulist1_callback",
    "policy_yunfulist2_callback",
    "policy_yunfuarticle_callback",
    "policy_yunfuarticle_etl_callback",


    "policy_hefeiarticle_etl_callback",
    "policy_wuhuarticle_etl_callback",
    "policy_bengbuarticle_etl_callback",
    "policy_tlarticle_etl_callback",
    "policy_jdzarticle_etl_callback",
    "policy_pingxiangarticle_etl_callback",
]


def clean_text(text):
    # 用于取 : 之后的文本
    if text is not None:
        text = re.sub(".*[:：](.*)", "\\1", text)
        text = text.strip()
        return text
    else:
        return ""


def clean_pubdate(value):
    if not value:
        return ''
    value = re.sub('\D', '', value)
    value = value[:8]
    value = value.ljust(8, '0')
    if int(value[4:6]) > 12:
        value = value[:4] + '0000'
    if int(value[6:]) > 31:
        value = value[:6] + '00'
    return value


def cleaned(value):
    if value:
        if type(value) is list:
            return ' '.join([i.strip() for i in value]).strip()
        else:
            return value.strip()
    else:
        return ""


def judge_url(url):
    if len(url) > 500:
        return True
    if '/' not in url.replace('//', ''):
        return True
    if 'mailt' in url or 'data:image/' in url or 'javascript:' in url or '#' in url or 'weixin.qq' in url:
        return True
    if '.baidu' in url or '。' in url or '@163' in url or '.cn/）' in url or '8080）' in url or 'cn）' in url:
        return True
    if 'cn，' in url or 'com，' in url or 'cn,' in url or 'haosou.' in url or 'www.so.' in url or 'file://' in url:
        return True
    if 'C:' in url or 'baike.soso' in url or 'weibo.com' in url or 'baike.sogou' in url or 'html）' in url:
        return True
    if 'shtml）' in url or 'phtml）' in url or 'wx.qq.' in url or 'bing.com' in url:
        return True
    if url.endswith('/') or url.endswith('.net') or url.endswith('.asp') or url.endswith('.shtml'):
        return True
    if url.endswith('/share') or url.endswith('.exe') or url.endswith('.xml'):
        return True
    if url.endswith('pdf}') or url.endswith('jpg}'):
        return True
    ends = url.split('/')[-1].lower()
    if not ends:
        return True
    if ends.endswith('.htm') or ends.endswith('.shtml') or ends.endswith('.jhtml') or ends.endswith('.org'):
        return True
    if ends.endswith('xhtml') or ends.endswith('.phtml') or ends.endswith('.cn') or ends.endswith('.com'):
        return True
    if ends.endswith('.html') or ends.endswith('.mht') or ends.endswith('.html%20'):
        return True
    if '.jsp' in ends and len(ends.split('.', 1)[1]) < 7:
        return True

    return False


def get_file_info(data, res, xpath):
    url = data['provider_url']
    pub_year = data['pub_year']
    keyid = data['keyid']
    file_info = list()
    tag_list = res.xpath(f'{xpath}//a')
    url_list = list()
    for tag in tag_list:
        file_href = tag.xpath('@href').extract_first()
        if file_href and file_href.strip():
            file_href = file_href.strip()
            try:
                file_url = parse.urljoin(url, file_href)
            except:
                continue
            if judge_url(file_url):
                continue
            name = ''.join(tag.xpath('.//text()').extract()).strip()
            if file_url in url_list:
                continue
            else:
                url_list.append(file_url)
            file_info.append({'url': file_url, 'name': name, 'pub_year': pub_year, 'keyid': keyid})
    img_hrefs = res.xpath(f'{xpath}//*/@src').extract()
    for img_href in img_hrefs:
        if img_href.strip():
            img_href = img_href.strip()
            img_url = parse.urljoin(url, img_href)
            if judge_url(img_url):
                continue
            if img_url in url_list:
                continue
            else:
                url_list.append(img_url)
            file_info.append({'url': img_url, 'name': img_href, 'pub_year': pub_year, 'keyid': keyid})
    return file_info


def deal_sql_dict(sql_dict):
    sql_dict.pop("id")
    sql_dict.pop("update_time")
    sql_dict.pop("create_time")
    sql_dict.pop("null_dicts")
    sql_dict.pop("err_msg")
    sql_dict.pop("other_dicts")
    sql_dict.pop("state")
    sql_dict.pop("failcount")
    return sql_dict


def init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider):
    data = dict()
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = product
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = zt_provider
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'
    return data


def init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year):
    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    return full_text_data


# 安徽省发展和改革委员会
def policy_fzggwahlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = res.css('div div[class="listnews"] script').get()
        if page_info:
            max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(max_count[0]) if max_count else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('div div[class="listnews"] ul li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('a::attr(href)').get()
            title = li.css('a::attr(title)').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99292'
            article_json["url"] = url
            article_json["title"] = title
            article_json["pub_date"] = li.css('span[class="right date"]::text').get().replace('发布日期：', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_fzggwahlist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info = res.css('div div[class="listnews"] script').get()
        page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('#xxgk_nav_con ul li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('a::attr(href)').get()
            title = li.css('a::text').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99292'
            article_json["url"] = url
            article_json["title"] = title
            article_json["pub_date"] = li.css('span[class="date"]::text').get().replace('发布日期：', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    # print(result)

    return result


def policy_fzggwaharticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_fzggwaharticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="gk_title"]/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1[@class="newstitle"]/text()').extract()).strip()
        fulltext_xpath = '//div[@class="con_main"]'
    else:
        fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    pub_no_before = res.xpath(
        '//table[contains(@class, "table_suoyin")][2]//th[contains(text(),"文") and contains(text(),"号")]')
    pub_no = ''.join(pub_no_before.xpath('../td/text()').extract()).strip()
    index_no_before = res.xpath('//table[contains(@class, "table_suoyin")][2]//th[contains(text(),"索")]')
    index_no = ''.join(index_no_before.xpath('../td/text()').extract()).strip()
    subject_before = res.xpath(
        '//table[contains(@class, "table_suoyin")][2]//th[contains(text(),"主") and contains(text(),"类")]')
    subject = ''.join(subject_before.xpath('../td/text()').extract()).strip()
    written_date_before = res.xpath(
        '//table[contains(@class, "table_suoyin")][2]//th[contains(text(),"成") and contains(text(),"期")]')
    written_date = ''.join(written_date_before.xpath('../td/text()').extract()).strip()

    legal_status_before = res.xpath(
        '//table[contains(@class, "table_suoyin")][2]//th[contains(text(),"有")]')
    legal_status = ''.join(legal_status_before.xpath('../td/text()').extract()).strip()
    impl_date_before = res.xpath(
        '//table[contains(@class, "table_suoyin")][2]//th[contains(text(),"生") and contains(text(),"时")]')
    impl_date = ''.join(impl_date_before.xpath('../td/text()').extract()).strip()

    invalid_date_before = res.xpath(
        '//table[contains(@class, "table_suoyin")][2]//th[contains(text(),"废") and contains(text(),"时")]')
    invalid_date = ''.join(invalid_date_before.xpath('../td/text()').extract()).strip()
    keyword_before = res.xpath(
        '//table[contains(@class, "table_suoyin")][2]//th[contains(text(),"关") and contains(text(),"键")]')
    keyword = ''.join(keyword_before.xpath('../td/text()').extract()).strip()

    organ_before = res.xpath(
        '//table[contains(@class, "table_suoyin")][2]//th[contains(text(),"发") and contains(text(),"构")]')
    organ = ''.join(organ_before.xpath('../td/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('省'):
        organ = '安徽' + organ

    # fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99292'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "FZGGWAH"
    zt_provider = "fzggwahgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['impl_date'] = clean_pubdate(impl_date)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['keyword'] = keyword
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'(//div[@aa])')
    # file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 安徽省经济和信息化厅
def policy_jxahlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info = res.css('div div[class="listnews"] script').get()
        page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('div div[class="navjz clearfix"] ul li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('a::attr(href)').get()
            title = li.css('a::attr(title)').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99291'
            article_json["url"] = url
            article_json["title"] = title
            article_json["pub_date"] = li.css('span[class="right date"]::text').get().replace('发布日期：', '').replace('[',
                                                                                                                   '').replace(
                ']', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jxahlist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info = res.css('div div[class="listnews"] script').get()
        page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('#xxgk_nav_con ul li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('a::attr(href)').get()
            title = li.css('a::text').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99291'
            article_json["url"] = url
            article_json["title"] = title
            article_json["pub_date"] = li.css('span[class="date"]::text').get().replace('发布日期：', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    # print(result)

    return result


def policy_jxaharticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jxaharticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="wztit"]/text()').extract()).strip()
    # if not title:
    #     title = ''.join(res.xpath('//h1[@class="wztit"]/text()').extract()).strip()
    #     fulltext_xpath = '//div[@class="con_main"]'
    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    pub_no_before = res.xpath(
        '//table[contains(@class, "table_suoyin")][2]//th[contains(text(),"文") and contains(text(),"号")]')
    pub_no = ''.join(pub_no_before.xpath('../td/text()').extract()).strip()
    index_no_before = res.xpath('//table[contains(@class, "table_suoyin")][2]//th[contains(text(),"索")]')
    index_no = ''.join(index_no_before.xpath('../td/text()').extract()).strip()
    subject_before = res.xpath(
        '//table[contains(@class, "table_suoyin")][2]//th[contains(text(),"息") and contains(text(),"类")]')
    subject = ''.join(subject_before.xpath('../td/text()').extract()).strip()
    written_date_before = res.xpath(
        '//table[contains(@class, "table_suoyin")][2]//th[contains(text(),"成") and contains(text(),"期")]')
    written_date = ''.join(written_date_before.xpath('../td/text()').extract()).strip()

    legal_status_before = res.xpath(
        '//table[contains(@class, "table_suoyin")][2]//th[contains(text(),"有")]')
    legal_status = ''.join(legal_status_before.xpath('../td/text()').extract()).strip()
    # impl_date_before = res.xpath(
    #     '//table[contains(@class, "table_suoyin")][2]//th[contains(text(),"生") and contains(text(),"时")]')
    # impl_date = ''.join(impl_date_before.xpath('../td/text()').extract()).strip()
    #
    # invalid_date_before = res.xpath(
    #     '//table[contains(@class, "table_suoyin")][2]//th[contains(text(),"废") and contains(text(),"时")]')
    # invalid_date = ''.join(invalid_date_before.xpath('../td/text()').extract()).strip()
    keyword_before = res.xpath(
        '//table[contains(@class, "table_suoyin")][2]//th[contains(text(),"关") and contains(text(),"键")]')
    keyword = ''.join(keyword_before.xpath('../td/text()').extract()).strip()

    organ_before = res.xpath(
        '//table[contains(@class, "table_suoyin")][2]//th[contains(text(),"发") and contains(text(),"构")]')
    organ = ''.join(organ_before.xpath('../td/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('省'):
        organ = '安徽' + organ

    fulltext_xpath = '//div[@class="gkwz_content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()

    if not fulltext:
        fulltext_xpath = '//div[contains(@class, "wzcon")]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@id="con_con"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99291'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "JXAH"
    zt_provider = "jxahgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['keyword'] = keyword
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'(//div[@aa])')
    # file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 安徽省科学技术厅
def policy_kjtahlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info = res.css('div div[class="listnews"] script').get()
        page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('div div[class="ls-cur-navjz clearfix"] ul li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('a::attr(href)').get()
            title = li.css('a::attr(title)').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm",
                                                                                                     "").replace(
                                       "?tdsourcetag=s_pcqq_aiomsg", ""))
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99293'
            article_json["url"] = url
            article_json["title"] = title
            article_json["pub_date"] = li.css('span[class="right date"]::text').get().replace('发布日期：', '').replace('[',
                                                                                                                   '').replace(
                ']', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    # print(result)
    return result


def solve_replace(text, rep_text, target_text=""):
    if text is not None:
        text = text.replace(rep_text, target_text).strip()
    else:
        text = ""
    return text


def policy_kjtahlist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info = res.css('div div[class="listnews"] script').get()
        page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('div[class="zcwjbox"] tr[class="xxgk_nav_con"]')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('td[class="info"] a[class="title"]::attr(href)').get()
            title = li.css('td[class="info"] a[class="title"]::text').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm",
                                                                                                     "").replace(
                                       "?tdsourcetag=s_pcqq_aiomsg", ""))
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99293'
            article_json["url"] = url
            article_json["title"] = title
            article_json["pub_date"] = solve_replace(li.css('td[class="fbrq"]::text').get(), '发布日期：')
            article_json["written_date"] = solve_replace(li.css('td[class="cwrq"]::text').get(), '成文日期：')
            article_json["pub_no"] = solve_replace(li.css('td[class="fwrq"]::text').get(), '发布日期：')
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_kjtahlist3_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info = res.css('div div[class="listnews"] script').get()
        page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('#zdgkList ul li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('a::attr(href)').get()
            title = li.css('a::text').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm",
                                                                                                     "").replace(
                                       "?tdsourcetag=s_pcqq_aiomsg", ""))
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99293'
            article_json["url"] = url
            article_json["title"] = title
            article_json["pub_date"] = li.css('span[class="date"]::text').get().replace('发布日期：', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    # print(result)

    return result


def policy_kjtaharticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_kjtaharticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="newstitle"]/text()').extract()).strip()
    # if not title:
    #     title = ''.join(res.xpath('//h1[@class="wztit"]/text()').extract()).strip()
    #     fulltext_xpath = '//div[@class="con_main"]'
    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    pub_no_before = res.xpath(
        '//table[contains(@class, "table_suoyin")][2]//th[contains(text(),"文") and contains(text(),"号")]')
    pub_no = ''.join(pub_no_before.xpath('../td/text()').extract()).strip()
    if not pub_no:
        pub_no = article_json.get("pub_no", "")
    index_no_before = res.xpath('//table[contains(@class, "table_suoyin")][2]//th[contains(text(),"索")]')
    index_no = ''.join(index_no_before.xpath('../td/text()').extract()).strip()
    subject_before = res.xpath(
        '//table[contains(@class, "table_suoyin")][2]//th[contains(text(),"主") and contains(text(),"类")]')
    subject = ''.join(subject_before.xpath('../td/text()').extract()).strip()
    # written_date_before = res.xpath(
    #     '//table[contains(@class, "table_suoyin")][2]//th[contains(text(),"成") and contains(text(),"期")]')
    # written_date = ''.join(written_date_before.xpath('../td/text()').extract()).strip()
    written_date = article_json.get("written_date", "")
    # legal_status_before = res.xpath(
    #     '//table[contains(@class, "table_suoyin")][2]//th[contains(text(),"有")]')
    # legal_status = ''.join(legal_status_before.xpath('../td/text()').extract()).strip()
    impl_date_before = res.xpath(
        '//table[contains(@class, "table_suoyin")][2]//th[contains(text(),"生") and contains(text(),"期")]')
    impl_date = ''.join(impl_date_before.xpath('../td/text()').extract()).strip()
    #
    # invalid_date_before = res.xpath(
    #     '//table[contains(@class, "table_suoyin")][2]//th[contains(text(),"废") and contains(text(),"时")]')
    # invalid_date = ''.join(invalid_date_before.xpath('../td/text()').extract()).strip()
    keyword_before = res.xpath(
        '//table[contains(@class, "table_suoyin")][2]//th[contains(text(),"关")]')
    keyword = ''.join(keyword_before.xpath('../td/text()').extract()).strip()

    organ_before = res.xpath(
        '//table[contains(@class, "table_suoyin")][2]//th[contains(text(),"发") and contains(text(),"构")]')
    organ = ''.join(organ_before.xpath('../td/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('省'):
        organ = '安徽' + organ

    fulltext_xpath = '//font[@id="Zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()

    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "wzcon")]'
        fulltext_xpath = '//div[@class="newscontnet-box"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="ls-content-con"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99293'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "KJTAH"
    zt_provider = "kjtahgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['keyword'] = keyword
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'(//div[@aa])')
    # file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 安徽省教育厅
def policy_jytahlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info = res.css('div div[class="listnews"] script').get()
        page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('div div[class="listnews"] ul li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('a::attr(href)').get()
            title = li.css('a::attr(title)').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm",
                                                                                                     "").replace(
                                       "?tdsourcetag=s_pcqq_aiomsg", ""))
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99294'
            article_json["url"] = url
            article_json["title"] = title
            article_json["pub_date"] = li.css('span[class="right date"]::text').get().replace('发布日期：', '').replace('[',
                                                                                                                   '').replace(
                ']', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    # print(result)
    return result


def policy_jytahlist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info = res.css('div div[class="listnews"] script').get()
        page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('div[class="zcwjbox"] tr[class="xxgk_nav_con"]')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('td[class="info"] a[class="title"]::attr(href)').get()
            title = li.css('td[class="info"] a[class="title"]::text').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm",
                                                                                                     "").replace(
                                       "?tdsourcetag=s_pcqq_aiomsg", ""))
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99294'
            article_json["url"] = url
            article_json["title"] = title
            article_json["pub_date"] = solve_replace(li.css('td[class="fbrq"]::text').get(), '发布日期：')
            article_json["written_date"] = solve_replace(li.css('td[class="cwrq"]::text').get(), '成文日期：')
            article_json["pub_no"] = solve_replace(li.css('td[class="fwrq"]::text').get(), '发布日期：')
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    return result


def policy_jytahlist3_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info = res.css('div div[class="listnews"] script').get()
        page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('div[class="xxgk_nav_con"] ul li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('a::attr(href)').get()
            title = li.css('a::text').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm",
                                                                                                     "").replace(
                                       "?tdsourcetag=s_pcqq_aiomsg", ""))
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99294'
            article_json["url"] = url
            article_json["title"] = title
            article_json["pub_date"] = li.css('span[class="date"]::text').get().replace('发布日期：', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    # print(result)

    return result


def policy_jytaharticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jytaharticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="newstitle"]/text()').extract()).strip()
    # if not title:
    #     title = ''.join(res.xpath('//h1[@class="wztit"]/text()').extract()).strip()
    #     fulltext_xpath = '//div[@class="con_main"]'
    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    pub_no_before = res.xpath(
        '//table[contains(@class, "table_suoyin")][2]//th[contains(text(),"文") and contains(text(),"号")]')
    pub_no = ''.join(pub_no_before.xpath('../td/text()').extract()).strip()
    if not pub_no:
        pub_no = article_json.get("pub_no", "")
    index_no_before = res.xpath('//table[contains(@class, "table_suoyin")][2]//th[contains(text(),"索")]')
    index_no = ''.join(index_no_before.xpath('../td/text()').extract()).strip()
    subject_before = res.xpath(
        '//table[contains(@class, "table_suoyin")][2]//th[contains(text(),"内") and contains(text(),"类")]')
    subject = ''.join(subject_before.xpath('../td/text()').extract()).strip()
    written_date_before = res.xpath(
        '//table[contains(@class, "table_suoyin")][2]//th[contains(text(),"成") and contains(text(),"期")]')
    written_date = ''.join(written_date_before.xpath('../td/text()').extract()).strip()
    if not written_date:
        written_date = article_json.get("written_date", "")
    legal_status_before = res.xpath(
        '//table[contains(@class, "table_suoyin")][2]//th[contains(text(),"有")]')
    legal_status = ''.join(legal_status_before.xpath('../td/text()').extract()).strip()
    # impl_date_before = res.xpath(
    #     '//table[contains(@class, "table_suoyin")][2]//th[contains(text(),"生") and contains(text(),"期")]')
    # impl_date = ''.join(impl_date_before.xpath('../td/text()').extract()).strip()
    #
    # invalid_date_before = res.xpath(
    #     '//table[contains(@class, "table_suoyin")][2]//th[contains(text(),"废") and contains(text(),"时")]')
    # invalid_date = ''.join(invalid_date_before.xpath('../td/text()').extract()).strip()
    keyword_before = res.xpath(
        '//table[contains(@class, "table_suoyin")][2]//th[contains(text(),"关")]')
    keyword = ''.join(keyword_before.xpath('../td/text()').extract()).strip()

    organ_before = res.xpath(
        '//table[contains(@class, "table_suoyin")][2]//th[contains(text(),"发") and contains(text(),"构")]')
    organ = ''.join(organ_before.xpath('../td/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('省'):
        organ = '安徽' + organ

    fulltext_xpath = '//div[@class="minh500 clearfix"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()

    if not fulltext:
        fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        # fulltext_xpath = '//div[@class="minh500 clearfix"]'
        # fulltext = res.xpath(fulltext_xpath).extract_first()
    # if not fulltext:
    #     fulltext_xpath = '//div[@class="ls-content-con"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99294'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "JYTAH"
    zt_provider = "jytahgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['keyword'] = keyword
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'(//div[@aa])')
    # file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 安徽省民政厅
def policy_mzahlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info = res.css('div div[class="listnews"] script').get()
        page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('div div[class="navjz clearfix"] ul li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('a::attr(href)').get()
            title = li.css('a::attr(title)').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99295'
            article_json["url"] = url
            article_json["title"] = title
            article_json["pub_date"] = li.css('span[class="right date"]::text').get().replace('发布日期：', '').replace('[',
                                                                                                                   '').replace(
                ']', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_mzahlist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info = res.css('div div[class="listnews"] script').get()
        page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('#zdgkList ul li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('a[class="title"]::attr(href)').get()
            title = li.css('a[class="title"]::text').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99295'
            article_json["url"] = url
            article_json["title"] = title
            article_json["pub_date"] = li.css('span[class="date"]::text').get().replace('发布日期：', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    # print(result)

    return result


def policy_mzaharticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_mzaharticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="wztit"]/text()').extract()).strip()
    # if not title:
    #     title = ''.join(res.xpath('//h1[@class="wztit"]/text()').extract()).strip()
    #     fulltext_xpath = '//div[@class="con_main"]'
    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    pub_no_before = res.xpath(
        '//table[contains(@class, "table_suoyin")]//th[contains(string(),"文号")]')
    pub_no = ''.join(pub_no_before.xpath('./following-sibling::td[1]/text()').extract()).strip()

    index_no_before = res.xpath('//table[contains(@class, "table_suoyin")]//th[contains(string(),"索引")]')
    index_no = ''.join(index_no_before.xpath('./following-sibling::td[1]/text()').extract()).strip()

    subject_before = res.xpath(
        '//table[contains(@class, "table_suoyin")]//th[contains(string(),"内容分类")]')
    subject = ''.join(subject_before.xpath('./following-sibling::td[1]/text()').extract()).strip()

    written_date_before = res.xpath(
        '//table[contains(@class, "table_suoyin")]//th[contains(string(),"成文日期")]')
    written_date = ''.join(written_date_before.xpath('./following-sibling::td[1]/text()').extract()).strip()

    legal_status_before = res.xpath(
        '//table[contains(@class, "table_suoyin")]//th[contains(string(),"有效性")]')
    legal_status = ''.join(legal_status_before.xpath('./following-sibling::td[1]/text()').extract()).strip()
    # impl_date_before = res.xpath(
    #     '//table[contains(@class, "table_suoyin")][2]//th[contains(text(),"生") and contains(text(),"时")]')
    # impl_date = ''.join(impl_date_before.xpath('../td/text()').extract()).strip()
    #
    # invalid_date_before = res.xpath(
    #     '//table[contains(@class, "table_suoyin")][2]//th[contains(text(),"废") and contains(text(),"时")]')
    # invalid_date = ''.join(invalid_date_before.xpath('../td/text()').extract()).strip()
    keyword_before = res.xpath(
        '//table[contains(@class, "table_suoyin")]//th[contains(string(),"关键词")]')
    keyword = ''.join(keyword_before.xpath('./following-sibling::td[1]/text()').extract()).strip()

    organ_before = res.xpath(
        '//table[contains(@class, "table_suoyin")]//th[contains(string(),"发布机构")]')
    organ = ''.join(organ_before.xpath('./following-sibling::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('省'):
        organ = '安徽' + organ

    fulltext_xpath = '//div[@class="con_mainline"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()

    # if not fulltext:
    #     fulltext_xpath = '//div[contains(@id, "wenzhang")]'
    #     fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@id="wenzhang"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99295'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "MZAH"
    zt_provider = "mzahgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['keyword'] = keyword
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'(//div[@aa])')
    # file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 安徽省财政厅
def policy_cztahlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info = res.css('div div[class="listnews"] script').get()
        page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('div[class="ls-cur-navjz clearfix"] ul li')
        # li_list = res.css('div[contains(@class, "doc_list")] ul li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('a::attr(href)').get()
            title = li.css('a::attr(title)').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99296'
            article_json["url"] = url
            article_json["title"] = title
            article_json["pub_date"] = li.css('span[class="right date"]::text').get().replace('发布日期：', '').replace('[',
                                                                                                                   '').replace(
                ']', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cztahlist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info = res.css('div div[class="listnews"] script').get()
        page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('#xxgk_nav_con ul li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('a[class="title"]::attr(href)').get()
            title = li.css('a[class="title"]::text').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99296'
            article_json["url"] = url
            article_json["title"] = title
            article_json["pub_date"] = li.css('span[class="date"]::text').get().replace('发布日期：', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    # print(result)

    return result


def policy_cztaharticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_cztaharticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="newstitle"]/text()').extract()).strip()
    # if not title:
    #     title = ''.join(res.xpath('//h1[@class="wztit"]/text()').extract()).strip()
    #     fulltext_xpath = '//div[@class="con_main"]'
    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    pub_no_before = res.xpath(
        '//div[contains(@class, "con_main")]//th[contains(text(),"文") and contains(text(),"号")]')
    # pub_no = ''.join(pub_no_before.xpath('./following-sibling::td[1]/text()').extract()).strip()
    pub_no = pub_no_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if pub_no is not None:
        pub_no = pub_no.strip()
    else:
        pub_no = ""

    index_no_before = res.xpath(
        '//div[contains(@class, "con_main")]//th[contains(text(),"索") and contains(text(),"引")]')
    # index_no = ''.join(index_no_before.xpath('./following-sibling::td[1]/text()').extract()).strip()
    index_no = index_no_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if index_no is not None:
        index_no = index_no.strip()
    else:
        index_no = ""

    subject_before = res.xpath(
        '//div[contains(@class, "con_main")]//th[contains(text(),"主") and contains(text(),"类")]')
    subject = subject_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if subject is not None:
        subject = subject.strip()
    else:
        subject = ""

    written_date_before = res.xpath(
        '//div[contains(@class, "con_main")]//th[contains(text(),"成") and contains(text(),"期")]')
    written_date = written_date_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if written_date is not None:
        written_date = written_date.strip()
    else:
        written_date = ""

    legal_status_before = res.xpath(
        '//div[contains(@class, "con_main")]//th[contains(string(),"有效性")]')
    legal_status = legal_status_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if legal_status is not None:
        legal_status = legal_status.strip()
    else:
        legal_status = ""
    # impl_date_before = res.xpath(
    #     '//table[contains(@class, "table_suoyin")][2]//th[contains(text(),"生") and contains(text(),"时")]')
    # impl_date = ''.join(impl_date_before.xpath('../td/text()').extract()).strip()
    #
    # invalid_date_before = res.xpath(
    #     '//table[contains(@class, "table_suoyin")][2]//th[contains(text(),"废") and contains(text(),"时")]')
    # invalid_date = ''.join(invalid_date_before.xpath('../td/text()').extract()).strip()
    keyword_before = res.xpath(
        '//div[contains(@class, "con_main")]//th[contains(text(),"关") and contains(text(),"键")]')
    keyword = keyword_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if keyword is not None:
        keyword = keyword.strip()
    else:
        keyword = ""

    organ_before = res.xpath(
        '//div[contains(@class, "con_main")]//th[contains(text(),"发") and contains(text(),"布") and contains(text(),"构")]')
    organ = organ_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if organ is not None:
        organ = organ.strip()
    else:
        organ = ""
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('省'):
        organ = '安徽' + organ

    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()

    # if not fulltext:
    #     fulltext_xpath = '//div[contains(@id, "wenzhang")]'
    #     fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@id="con_main"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99296'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CZTAH"
    zt_provider = "cztahgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['keyword'] = keyword
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'(//div[@aa])')
    # file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 安徽省人力资源和社会保障厅
def policy_hrssahlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info = res.css('div div[class="listnews"] script').get()
        page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('div div[class="navjz clearfix"] ul li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('a::attr(href)').get()
            title = li.css('a::attr(title)').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99297'
            article_json["url"] = url
            article_json["title"] = title
            article_json["pub_date"] = li.css('span[class="right date"]::text').get().replace('发布日期：', '').replace('[',
                                                                                                                   '').replace(
                ']', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_hrssahlist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info = res.css('div div[class="listnews"] script').get()
        page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('#zdgkList ul li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('a[class="title"]::attr(href)').get()
            title = li.css('a[class="title"]::text').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99297'
            article_json["url"] = url
            article_json["title"] = title
            article_json["pub_date"] = li.css('span[class="date"]::text').get().replace('发布日期：', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    # print(result)

    return result


def policy_hrssahlist3_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info = res.css('div div[class="listnews"] script').get()
        page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('#zdgkList ul li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('a[class="title"]::attr(href)').get()
            title = li.css('a[class="title"]::text').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99297'
            article_json["url"] = url
            article_json["title"] = title
            article_json["pub_date"] = li.css('span[class="date"]::text').get().replace('发布日期：', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    # print(result)

    return result


def policy_hrssaharticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_hrssaharticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="gk_title"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1[@class="wztit"]//text()').extract()).strip()
    #     fulltext_xpath = '//div[@class="con_main"]'
    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    pub_no_before = res.xpath(
        '//div[contains(@class, "con_main")]//th[contains(text(),"文") and contains(text(),"号")]')
    # pub_no = ''.join(pub_no_before.xpath('./following-sibling::td[1]/text()').extract()).strip()
    pub_no = pub_no_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if pub_no is not None:
        pub_no = pub_no.strip()
    else:
        pub_no = ""

    index_no_before = res.xpath(
        '//div[contains(@class, "con_main")]//th[contains(string(),"索引")]')
    # index_no = ''.join(index_no_before.xpath('./following-sibling::td[1]/text()').extract()).strip()
    index_no = index_no_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if index_no is not None:
        index_no = index_no.strip()
    else:
        index_no = ""

    subject_before = res.xpath(
        '//div[contains(@class, "con_main")]//th[contains(text(),"主") and contains(text(),"类")]')
    subject = subject_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if subject is not None:
        subject = subject.strip()
    else:
        subject = ""

    written_date_before = res.xpath(
        '//div[contains(@class, "con_main")]//th[contains(text(),"成") and contains(text(),"期")]')
    written_date = written_date_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if written_date is not None:
        written_date = written_date.strip()
    else:
        written_date = ""

    legal_status_before = res.xpath(
        '//div[contains(@class, "con_main")]//th[contains(string(),"有效性")]')
    legal_status = legal_status_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if legal_status is not None:
        legal_status = legal_status.strip()
    else:
        legal_status = ""
    impl_date_before = res.xpath(
        '//div[contains(@class, "con_main")]//th[contains(text(),"生") and contains(text(),"效") and contains(text(),"间")]')
    impl_date = impl_date_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if impl_date is not None:
        impl_date = impl_date.strip()
    else:
        impl_date = ""

    invalid_date_before = res.xpath(
        '//div[contains(@class, "con_main")]//th[contains(text(),"废") and contains(text(),"止") and contains(text(),"间")]')
    invalid_date = invalid_date_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if invalid_date is not None:
        invalid_date = invalid_date.strip()
    else:
        invalid_date = ""

    keyword_before = res.xpath(
        '//div[contains(@class, "con_main")]//th[contains(string(),"关键词")]')
    keyword = keyword_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if keyword is not None:
        keyword = keyword.strip()
    else:
        keyword = ""

    organ_before = res.xpath(
        '//div[contains(@class, "con_main")]//th[contains(text(),"发") and contains(text(),"布") and contains(text(),"构")]')
    organ = organ_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if organ is not None:
        organ = organ.strip()
    else:
        organ = ""
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('省'):
        organ = '安徽' + organ

    fulltext_xpath = '//div[contains(@class, "xxgkcontent")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()

    # if not fulltext:
    #     fulltext_xpath = '//div[contains(@id, "clearfix xxgkcontent")]'
    #     fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[contains(@class, "wzcon")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99297'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "HRSSAH"
    zt_provider = "hrssahgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['impl_date'] = clean_pubdate(impl_date)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['keyword'] = keyword
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'(//div[@aa])')
    # file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 安徽省农业农村厅
def policy_nyncahlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info = res.css('div div[class="listnews"] script').get()
        page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('div div[class="navjz clearfix"] ul li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('a::attr(href)').get()
            title = li.css('a::attr(title)').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99298'
            article_json["url"] = url
            article_json["title"] = title
            article_json["pub_date"] = li.css('span[class="right date"]::text').get().replace('发布日期：', '').replace('[',
                                                                                                                   '').replace(
                ']', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_nyncahlist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info = res.css('div div[class="listnews"] script').get()
        page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)

        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('div[class="xxgk_nav_con"] div[class="xxgk_navli"] ul')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('li[class="mc"] a[class="title"]::attr(href)').get()
            title = li.css('li[class="mc"] a[class="title"]::text').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99298'
            article_json["url"] = url
            article_json["title"] = title.strip()
            article_json["pub_date"] = li.css('li[class="rq"]::text').get().replace('发布日期：', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    # print(result)

    return result


def policy_nyncahlist3_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info = res.css('div div[class="listnews"] script').get()
        page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('div[class="xxgk_nav_con"] ul li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('a[class="title"]::attr(href)').get()
            title = li.css('a[class="title"]::text').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99298'
            article_json["url"] = url
            article_json["title"] = title
            article_json["pub_date"] = li.css('span[class="date"]::text').get().replace('发布日期：', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    # print(result)

    return result


def policy_nyncaharticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_nyncaharticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="newstitle"]//text()').extract()).strip()
    if '\n' in title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no_before = res.xpath(
        '//div[contains(@class, "table_suoyin")]//th[contains(text(),"文") and contains(text(),"号")]')
    pub_no = pub_no_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if pub_no is not None:
        pub_no = pub_no.strip()
    else:
        pub_no = ""

    index_no_before = res.xpath(
        '//div[contains(@class, "table_suoyin")]//th[contains(string(),"索引")]')
    # index_no = ''.join(index_no_before.xpath('./following-sibling::td[1]/text()').extract()).strip()
    index_no = index_no_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if index_no is not None:
        index_no = index_no.strip()
    else:
        index_no = ""

    subject_before = res.xpath(
        '//div[contains(@class, "table_suoyin")]//th[contains(text(),"主") and contains(text(),"类")]')
    subject = subject_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if subject is not None:
        subject = subject.strip()
    else:
        subject = ""

    legal_status_before = res.xpath(
        '//div[contains(@class, "table_suoyin")]//th[contains(text(),"有") and contains(text(),"效") and contains(text(),"性")]')
    legal_status = legal_status_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if legal_status is not None:
        legal_status = legal_status.strip()
    else:
        legal_status = ""

    keyword_before = res.xpath(
        '//div[contains(@class, "table_suoyin")]//th[contains(string(),"关键词")]')
    keyword = keyword_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if keyword is not None:
        keyword = keyword.strip()
    else:
        keyword = ""

    organ_before = res.xpath(
        '//div[contains(@class, "table_suoyin")]//th[contains(text(),"发") and contains(text(),"布") and contains(text(),"构")]')
    organ = organ_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if organ is not None:
        organ = organ.strip()
    else:
        organ = ""
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('省'):
        organ = '安徽' + organ

    fulltext_xpath = '//div[contains(@class, "xxgkcontent")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()

    # if not fulltext:
    #     fulltext_xpath = '//div[contains(@id, "clearfix xxgkcontent")]'
    #     fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@id, "zoom")]'
        fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99298'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "NYNCAH"
    zt_provider = "nyncahgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['keyword'] = keyword
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'(//div[@aa])')
    # file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 安徽省住房和城乡建设厅
def policy_dohurdahlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[contains(@class,"listnews")]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath('div[@class="doc-title"]/a/text()').extract_first().strip()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99299'
            article_json["url"] = url
            article_json["title"] = li.xpath('div[@class="doc-title"]/a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('div[@class="doc-time"]/span[@class="date"]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_dohurdahlist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}", "list_rawid_alt": list_json["list_rawid_alt"]}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)

        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('div[class="xxgk_nav_con"] ul li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('a[class="title"]::attr(href)').get()
            title = li.css('a[class="title"]::text').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99299'
            article_json["url"] = url
            article_json["title"] = title.strip()
            article_json["pub_date"] = li.css('span[class="date"]::text').get().replace('发布日期：', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    # print(result)

    return result


def policy_dohurdaharticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_dohurdaharticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="newstitle"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no_before = res.xpath(
        '//div[contains(@class, "table_suoyin")]//th[contains(text(),"文") and contains(text(),"号")]')
    pub_no = pub_no_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if pub_no is not None:
        pub_no = pub_no.strip()
    else:
        pub_no = ""

    index_no_before = res.xpath(
        '//div[contains(@class, "table_suoyin")]//th[contains(string(),"索引")]')
    index_no = index_no_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if index_no is not None:
        index_no = index_no.strip()
    else:
        index_no = ""

    subject_before = res.xpath(
        '//div[contains(@class, "table_suoyin")]//th[contains(text(),"主") and contains(text(),"类")]')
    subject = subject_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if subject is not None:
        subject = subject.strip()
    else:
        subject = ""

    written_date_before = res.xpath(
        '//div[contains(@class, "table_suoyin")]//th[contains(text(),"成") and contains(text(),"文") and contains(text(),"期")]')
    written_date = written_date_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if written_date is not None:
        written_date = written_date.strip()
    else:
        written_date = ""

    legal_status_before = res.xpath(
        '//div[contains(@class, "table_suoyin")]//th[contains(string(),"有效性")]')
    legal_status = legal_status_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if legal_status is not None:
        legal_status = legal_status.strip()
    else:
        legal_status = ""
    impl_date_before = res.xpath(
        '//div[contains(@class, "table_suoyin")]//th[contains(text(),"生") and contains(text(),"效") and contains(text(),"间")]')
    impl_date = impl_date_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if impl_date is not None:
        impl_date = impl_date.strip()
    else:
        impl_date = ""
    keyword_before = res.xpath(
        '//div[contains(@class, "table_suoyin")]//th[contains(string(),"关键词")]')
    keyword = keyword_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if keyword is not None:
        keyword = keyword.strip()
    else:
        keyword = ""

    organ_before = res.xpath(
        '//div[contains(@class, "table_suoyin")]//th[contains(text(),"发") and contains(text(),"布") and contains(text(),"构")]')
    organ = organ_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if organ is not None:
        organ = organ.strip()
    else:
        organ = ""
    if organ.startswith('省'):
        organ = '安徽' + organ

    fulltext_xpath = '//div[contains(@class, "xxgkcontent")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[contains(@class, "newscontnet")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99299'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "DOHURDAH"
    zt_provider = "dohurdahgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['keyword'] = keyword
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


# 安徽省卫生健康委员会
def policy_wjwahlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info = res.css('div div[class="listnews"] script').get()
        page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('div[class="ls-cur-navjz clearfix"] ul li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('a::attr(href)').get()
            title = li.css('a::attr(title)').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99300'
            article_json["url"] = url
            article_json["title"] = title
            article_json["pub_date"] = li.css('span[class="right date"]::text').get().replace('发布日期：', '').replace('[',
                                                                                                                   '').replace(
                ']', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_wjwahlist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info = res.css('div div[class="listnews"] script').get()
        page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)

        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('tr[class="xxgk_nav_con"]')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('td[class="info"] a[class="title"]::attr(href)').get()
            title = li.css('td[class="info"] a[class="title"]::text').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99300'
            article_json["url"] = url
            article_json["title"] = title.strip()
            article_json["pub_date"] = li.css('td[class="fbrq"]::text').get().replace('发布日期：', '').strip()
            article_json["written_date"] = solve_replace(li.css('td[class="cwrq"]::text').get(), '成文日期：')
            article_json["pub_no"] = solve_replace(li.css('td[class="fwrq"]::text').get(), '发布日期：')
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    # print(result)

    return result


def policy_wjwahlist3_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info = res.css('div div[class="listnews"] script').get()
        page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}", "list_rawid_alt": list_json["list_rawid_alt"]}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('div[class="xxgk_nav_con"] ul li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('a[class="title"]::attr(href)').get()
            title = li.css('a[class="title"]::text').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99300'
            article_json["url"] = url
            article_json["title"] = title
            article_json["pub_date"] = li.css('span[class="date"]::text').get().replace('发布日期：', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    # print(result)

    return result


def policy_wjwaharticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_wjwaharticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="newstitle"]//text()').extract()).strip()
    # if not title:
    #     title = ''.join(res.xpath('//h1[@class="wztit"]//text()').extract()).strip()
    #     fulltext_xpath = '//div[@class="con_main"]'
    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//div[contains(@class, "table_suoyin")]//th[contains(text(),"文") and contains(text(),"号")]')
    pub_no_before = res.xpath('//table[contains(@class, "table_suoyin")]//th[contains(string(),"文号")]')
    # pub_no = ''.join(pub_no_before.xpath('./following-sibling::td[1]/text()').extract()).strip()
    pub_no = pub_no_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if pub_no is not None:
        pub_no = pub_no.strip()
    else:
        pub_no = ""

    index_no_before = res.xpath(
        '//div[contains(@class, "table_suoyin")]//th[contains(string(),"索引")]')
    # index_no = ''.join(index_no_before.xpath('./following-sibling::td[1]/text()').extract()).strip()
    index_no = index_no_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if index_no is not None:
        index_no = index_no.strip()
    else:
        index_no = ""

    # subject_before = res.xpath('//div[contains(@class, "table_suoyin")]//th[contains(text(),"主") and contains(text(),"类")]')
    subject_before = res.xpath('//div[contains(@class, "table_suoyin")]//th[contains(string(),"主题分类")]')
    subject = subject_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if subject is not None:
        subject = subject.strip()
    else:
        subject = ""

    # written_date_before = res.xpath('//div[contains(@class, "con_main")]//th[contains(text(),"成") and contains(text(),"期")]')
    written_date_before = res.xpath('//div[contains(@class, "con_main")]//th[contains(string(),"成文日期")]')
    written_date = written_date_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if written_date is not None:
        written_date = written_date.strip()
    else:
        written_date = ""

    legal_status_before = res.xpath(
        '//div[contains(@class, "table_suoyin")]//th[contains(string(),"有效性")]')
    legal_status = legal_status_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if legal_status is not None:
        legal_status = legal_status.strip()
    else:
        legal_status = ""
    # impl_date_before = res.xpath(
    #     '//div[contains(@class, "con_main")]//th[contains(text(),"生") and contains(text(),"效") and contains(text(),"间")]')
    # impl_date = impl_date_before.xpath('./following-sibling::td[1]/text()').extract_first()
    # if impl_date is not None:
    #     impl_date = impl_date.strip()
    # else:
    #     impl_date = ""
    #
    # invalid_date_before = res.xpath(
    #     '//div[contains(@class, "con_main")]//th[contains(text(),"废") and contains(text(),"止") and contains(text(),"间")]')
    # invalid_date = invalid_date_before.xpath('./following-sibling::td[1]/text()').extract_first()
    # if invalid_date is not None:
    #     invalid_date = invalid_date.strip()
    # else:
    #     invalid_date = ""

    keyword_before = res.xpath(
        '//div[contains(@class, "table_suoyin")]//th[contains(string(),"关键词")]')
    keyword = keyword_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if keyword is not None:
        keyword = keyword.strip()
    else:
        keyword = ""

    organ_before = res.xpath(
        '//div[contains(@class, "table_suoyin")]//th[contains(string(),"发文机关")]')
    organ = organ_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if organ is not None:
        organ = organ.strip()
    else:
        organ = ""
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('省'):
        organ = '安徽' + organ

    fulltext_xpath = '//div[contains(@class, "gkwz_content")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()

    # if not fulltext:
    #     fulltext_xpath = '//div[contains(@id, "clearfix xxgkcontent")]'
    #     fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[contains(@class, "wzcon")]'
        # fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99300'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "WJWAH"
    zt_provider = "wjwahgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['keyword'] = keyword
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'(//div[@aa])')
    # file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 安徽省淮南市
def policy_huainanlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info = res.css('div div[class="listnews"] script').get()
        page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('div[class="navjz"] ul li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('a::attr(href)').get()
            title = li.css('a::attr(title)').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99304'
            article_json["url"] = url
            article_json["title"] = title
            article_json["pub_date"] = li.css('span[class="right date"]::text').get().replace('发布日期：', '').replace('[',
                                                                                                                   '').replace(
                ']', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_huainanlist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info = res.css('div div[class="listnews"] script').get()
        page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)

        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('tr[class="xxgk_nav_con"]')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('td[class="info"] a:first-of-type::attr(href)').get()
            title = li.css('td[class="info"] a:first-of-type::text').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99304'
            article_json["url"] = "https://www.huainan.gov.cn" + url
            article_json["title"] = title.strip()
            article_json["pub_date"] = li.css('td[class="fbrq"]::text').get().replace('发布日期：', '').strip()
            # article_json["written_date"] = solve_replace(li.css('td[class="cwrq"]::text').get(), '成文日期：')
            article_json["pub_no"] = solve_replace(li.css('td[class="fwrq"]::text').get(), '发布日期：')
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    # print(result)

    return result


def policy_huainanlist3_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info = res.css('div div[class="listnews"] script').get()
        page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}", "pagesize": list_json["pagesize"], "length": list_json["length"],
                       "file": list_json["file"]}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        text_1 = para_dicts["data"]["1_1"]['html']
        if text_1.find("pageTPList") != -1:
            li_list = res.css('tr[class="first"] td[class="title"]')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                url = li.css('a[class="tit"]::attr(href)').get()
                title = li.css('a[class="tit"]::text').get()
                if url is None:
                    continue
                elif 'htm' not in url:
                    continue

                rawid_list = url.split('/')
                rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99304'
                article_json["url"] = url
                article_json["title"] = title
                article_json["pub_date"] = ""
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.css('div[class="xxgk_nav_con"] ul li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                url = li.css('a[class="title"]::attr(href)').get()
                title = li.css('a[class="title"]::attr(title)').get()
                if url is None:
                    continue
                elif 'htm' not in url:
                    continue

                rawid_list = url.split('/')
                rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99304'
                article_json["url"] = url
                article_json["title"] = title
                article_json["pub_date"] = li.css('span[class="date"]::text').get().replace('发布日期：', '').strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    # print(result)

    return result


def policy_huainanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_huainanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="newstitle"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h2[@class="title"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1[@class="wztit"]//text()').extract()).strip()
    if '\n' in title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no_before = res.xpath('//table[contains(@class, "table_suoyin")]//th[contains(string(),"文号")]')
    pub_no = pub_no_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if pub_no is not None:
        pub_no = pub_no.strip()
    else:
        pub_no = ""

    index_no_before = res.xpath(
        '//div[contains(@class, "table_suoyin")]//th[contains(string(),"索引")]')
    index_no = index_no_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if index_no is not None:
        index_no = index_no.strip()
    else:
        index_no = ""

    subject_before = res.xpath('//div[contains(@class, "table_suoyin")]//th[contains(string(),"内容分类")]')
    subject = subject_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if subject is not None:
        subject = subject.strip()
    else:
        subject = ""

    written_date_before = res.xpath(
        '//div[contains(@class, "table_suoyin")]//th[contains(text(),"成") and contains(text(),"文") and contains(text(),"期")]')
    # written_date_before = res.xpath('//div[contains(@class, "con_main")]//th[contains(string(),"成文日期")]')
    written_date = written_date_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if written_date is not None:
        written_date = written_date.strip()
    else:
        written_date = ""

    legal_status_before = res.xpath(
        '//div[contains(@class, "table_suoyin")]//th[contains(string(),"有效性")]')
    legal_status = legal_status_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if legal_status is not None:
        legal_status = legal_status.strip()
    else:
        legal_status = ""
    impl_date_before = res.xpath(
        '//div[contains(@class, "table_suoyin")]//th[contains(text(),"生") and contains(text(),"效") and contains(text(),"间")]')
    impl_date = impl_date_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if impl_date is not None:
        impl_date = impl_date.strip()
    else:
        impl_date = ""
    #
    invalid_date_before = res.xpath(
        '//div[contains(@class, "table_suoyin")]//th[contains(text(),"废") and contains(text(),"止") and contains(text(),"间")]')
    invalid_date = invalid_date_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if invalid_date is not None:
        invalid_date = invalid_date.strip()
    else:
        invalid_date = ""

    keyword_before = res.xpath(
        '//div[contains(@class, "table_suoyin")]//th[contains(string(),"关键词")]')
    keyword = keyword_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if keyword is not None:
        keyword = keyword.strip()
    else:
        keyword = ""

    organ_before = res.xpath(
        '//div[contains(@class, "table_suoyin")]//th[contains(string(),"发布机")]')
    organ = organ_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if organ is not None:
        organ = organ.strip()
    else:
        organ = ""
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('市'):
        organ = '淮南' + organ

    fulltext_xpath = '//div[contains(@class, "gkwz_container")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()

    if not fulltext:
        fulltext_xpath = '//div[contains(@class, "wzcon")]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[contains(@class, "conTx")]'
        # fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99304'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "HUAINAN"
    zt_provider = "huainangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['impl_date'] = clean_pubdate(impl_date)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['keyword'] = keyword
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'(//div[@aa])')
    # file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 安徽省淮南市
def policy_maslist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info = res.css('div div[class="listnews"] script').get()
        page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('div[class="listnews ztlmpic"] ul li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('a::attr(href)').get()
            title = li.css('a::attr(title)').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99305'
            article_json["url"] = url
            article_json["title"] = title
            article_json["pub_date"] = li.css('p[class="p3"]::text').get().replace('发布日期：', '').replace('[',
                                                                                                        '').replace(
                ']', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_maslist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info_before = res.xpath(
            '//div[@id="page_list"]/a/span[contains(text(), "尾页")]/ancestor::a/@href').extract_first()
        page_info_before_list = page_info_before.split("&page=")
        page_info = page_info_before_list[-1]
        # page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)

        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('div[class="search-list"] tr')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('td[class="bt"] a:first-of-type::attr(href)').get()
            title = li.css('td[class="bt"] a:first-of-type::text').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99305'
            article_json["url"] = "https://www.mas.gov.cn" + url
            article_json["title"] = title.strip()
            article_json["pub_date"] = li.css('td[class="cwrq"]:last-of-type::text').get().replace('发布日期：', '').strip()
            article_json["written_date"] = solve_replace(li.css('td[class="cwrq"]:first-of-type::text').get(), '成文日期：')
            article_json["pub_no"] = solve_replace(li.css('td[class="fwrq"]::text').get(), '发布日期：')
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    # print(result)

    return result


def policy_maslist3_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info_before = res.xpath('//div[@id="page-list"]/span[contains(text(), "/")]/text()').extract_first()
        page_info_before_list = page_info_before.split("/")
        page_info = page_info_before_list[-1]
        # page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:

        li_list = res.css('div[class="list-right"] ul li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('a:first-of-type::attr(href)').get()
            title = li.css('a:first-of-type::attr(title)').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99305'
            article_json["url"] = "https://www.mas.gov.cn" + url
            article_json["title"] = title
            article_json["pub_date"] = li.css('td[class="cwrq"]:last-of-type::text').get().replace('发布日期：', '').strip()
            article_json["written_date"] = solve_replace(li.css('td[class="cwrq"]:first-of-type::text').get(), '成文日期：')
            article_json["pub_no"] = solve_replace(li.css('td[class="fwrq"]::text').get(), '发布日期：')
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    # print(result)

    return result


def policy_maslist4_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info = res.css('div div[class="listnews"] script').get()
        page_info_before = res.xpath('//div[@id="page_list"]/span[contains(text(), "/")]/text()').extract_first()
        page_info_before_list = page_info_before.split("/")
        page_info = page_info_before_list[-1].strip()
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:

        li_list = res.css('div[class="search-list"] tr')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('td[class="bt"] a:first-of-type::attr(href)').get()
            title = li.css('td[class="bt"] a:first-of-type::text').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99305'
            if url.startswith("/xxgk"):
                article_json["url"] = "https://www.mas.gov.cn" + url
            else:
                article_json["url"] = url
            article_json["title"] = title.strip().strip("|").strip()
            article_json["pub_date"] = li.css('td[class="cwrq"]:last-of-type::text').get().replace('发布日期：', '').strip()
            article_json["written_date"] = solve_replace(li.css('td[class="cwrq"]:first-of-type::text').get(), '成文日期：')
            article_json["pub_no"] = solve_replace(li.css('td[class="fwrq"]::text').get(), '发布日期：')
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    # print(result)

    return result


def policy_masarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_masarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    if pub_date:
        pub_year = pub_date[:4]
    else:
        pub_year = ""
    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="u-dttit"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h2[@class="newstitle"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1[@class="wztit"]//text()').extract()).strip()
    #     fulltext_xpath = '//div[@class="con_main"]'
    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//div[contains(@class, "table_suoyin")]//th[contains(text(),"文") and contains(text(),"号")]')
    pub_no_before = res.xpath('//div[contains(@class, "m-detailinfo")]//p[contains(string(),"文号")]')
    # pub_no = ''.join(pub_no_before.xpath('./following-sibling::td[1]/text()').extract()).strip()
    pub_no = pub_no_before.xpath('./following-sibling::div[1]/text()').extract_first()
    if pub_no is not None:
        pub_no = pub_no.strip()
    else:
        pub_no = ""

    index_no_before = res.xpath(
        '//div[contains(@class, "m-detailinfo")]//p[contains(string(),"索引")]')
    # index_no = ''.join(index_no_before.xpath('./following-sibling::td[1]/text()').extract()).strip()
    index_no = index_no_before.xpath('./following-sibling::div[1]/text()').extract_first()
    if index_no is not None:
        index_no = index_no.strip()
    else:
        index_no = ""

    # subject_before = res.xpath('//div[contains(@class, "table_suoyin")]//th[contains(text(),"主") and contains(text(),"类")]')
    subject_before = res.xpath('//div[contains(@class, "m-detailinfo")]//p[contains(string(),"内容分类")]')
    subject = subject_before.xpath('./following-sibling::div[1]/text()').extract_first()
    if subject is not None:
        subject = subject.strip()
    else:
        subject = ""

    written_date_before = res.xpath('//div[contains(@class, "m-detailinfo")]//p[contains(string(),"成文日期")]')
    # written_date_before = res.xpath('//div[contains(@class, "con_main")]//th[contains(string(),"成文日期")]')
    written_date = written_date_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if written_date is not None:
        written_date = written_date.strip()
    else:
        written_date = ""

    legal_status_before = res.xpath(
        '//div[contains(@class, "m-detailinfo")]//p[contains(string(),"有效性")]')
    legal_status = legal_status_before.xpath('./following-sibling::div[1]/text()').extract_first()
    if legal_status is not None:
        legal_status = legal_status.strip()
    else:
        legal_status = ""
    impl_date_before = res.xpath(
        '//div[contains(@class, "m-detailinfo")]//p[contains(string(),"生效时间")]')
    impl_date = impl_date_before.xpath('./following-sibling::div[1]/text()').extract_first()
    if impl_date is not None:
        impl_date = impl_date.strip()
    else:
        impl_date = ""
    #
    invalid_date_before = res.xpath(
        '//div[contains(@class, "m-detailinfo")]//p[contains(string(),"废止时间")]')
    invalid_date = invalid_date_before.xpath('./following-sibling::div[1]/text()').extract_first()
    if invalid_date is not None:
        invalid_date = invalid_date.strip()
    else:
        invalid_date = ""

    keyword_before = res.xpath(
        '//div[contains(@class, "m-detailinfo")]//p[contains(string(),"关键词")]')
    keyword = keyword_before.xpath('./following-sibling::div[1]/text()').extract_first()
    if keyword is not None:
        keyword = keyword.replace("\u3000", " ").strip()
    else:
        keyword = ""

    organ_before = res.xpath(
        '//div[contains(@class, "m-detailinfo")]//p[contains(string(),"发布机")]')
    organ = organ_before.xpath('./following-sibling::div[1]/text()').extract_first()
    if organ is not None:
        organ = organ.strip()
    else:
        organ = ""
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('市'):
        organ = '马鞍山' + organ

    fulltext_xpath = '//div[contains(@class, "m-detailtext")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()

    if not fulltext:
        fulltext_xpath = '//div[contains(@class, "wzcon")]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[contains(@class, "conTx")]'
        # fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99305'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "MAS"
    zt_provider = "masgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['impl_date'] = clean_pubdate(impl_date)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['keyword'] = keyword
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'(//div[@aa])')
    # file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 安徽省淮北市
def policy_huaibeilist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info = res.css('div div[class="listnews"] script').get()
        page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('div[class="listnews"] ul li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('a::attr(href)').get()
            title = li.css('a::attr(title)').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99306'
            article_json["url"] = url
            article_json["title"] = title
            article_json["pub_date"] = li.css('span[class="right date"]::text').get().replace('发布日期：', '').replace('[',
                                                                                                                   '').replace(
                ']', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_huaibeilist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info = res.css('div div[class="listnews"] script').get()
        page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)

        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('ul[class="zc-list"]')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('div[class="zc-title"] a::attr(href)').get()
            title = li.css('div[class="zc-title"] a::text').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99306'
            article_json["url"] = url
            article_json["title"] = title.strip()
            pub_date_before = li.css('div[class="zc-detail"] span::text').getall()
            pub_date = pub_no = organ = legal_status = ""
            for one_pub in pub_date_before:
                if one_pub.__contains__("印发日期"):
                    pub_date = one_pub.replace("印发日期：", "").strip()
                elif one_pub.__contains__("文号"):
                    pub_no = one_pub.replace("文号：", "").strip()
                elif one_pub.__contains__("发布机构"):
                    organ = one_pub.replace("发布机构：", "").strip()
                elif one_pub.__contains__("状态："):
                    legal_status = one_pub.replace("状态：", "").strip()
            article_json["pub_date"] = pub_date
            # article_json["written_date"] = solve_replace(li.css('td[class="cwrq"]::text').get(), '成文日期：')
            article_json["pub_no"] = pub_no
            article_json["organ"] = organ
            article_json["legal_status"] = legal_status
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    # print(result)

    return result


def policy_huaibeilist3_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info = res.css('div div[class="listnews"] script').get()
        page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:

        li_list = res.css('div[class="xxgk_nav_con"] ul li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('a[class="title"]::attr(href)').get()
            title = li.css('a[class="title"]::text').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99306'
            article_json["url"] = url
            article_json["title"] = title
            article_json["pub_date"] = li.css('span[class="date"]::text').get().replace('发布日期：', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    # print(result)

    return result


def policy_huaibeiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_huaibeiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="newstitle"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no_before = res.xpath('//div[contains(@class, "table_suoyin")]//th[contains(string(),"文号")]')
    pub_no = pub_no_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if pub_no is not None:
        pub_no = pub_no.strip()
    else:
        pub_no = ""
    index_no_before = res.xpath(
        '//div[contains(@class, "table_suoyin")]//th[contains(string(),"索引")]')
    index_no = index_no_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if index_no is not None:
        index_no = index_no.strip()
    else:
        index_no = ""
    subject_before = res.xpath('//div[contains(@class, "table_suoyin")]//th[contains(string(),"内容分类")]')
    subject = subject_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if subject is not None:
        subject = subject.strip()
    else:
        subject = ""

    keyword_before = res.xpath(
        '//div[contains(@class, "table_suoyin")]//th[contains(string(),"关键词")]')
    keyword = keyword_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if keyword is not None:
        keyword = keyword.strip()
    else:
        keyword = ""

    organ_before = res.xpath(
        '//div[contains(@class, "table_suoyin")]//th[contains(string(),"发布机")]')
    organ = organ_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if organ is not None:
        organ = organ.strip()
    else:
        organ = ""
    if organ.startswith('市'):
        organ = '淮北' + organ

    fulltext_xpath = '//div[@id="zoom"]|//div[contains(@class,"newscontnet")]|//div[@class="gzk-article"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()

    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99306'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "HUAIBEI"
    zt_provider = "huaibeigovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['keyword'] = keyword
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'(//div[@aa])')
    # file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 安徽省安庆市
def policy_anqinglist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info = res.css('div div[class="listnews"] script').get()
        page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('div[class="navjz"] ul li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('a::attr(href)').get()
            title = li.css('a::attr(title)').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99308'
            article_json["url"] = url
            article_json["title"] = title
            article_json["pub_date"] = li.css('span[class="right date"]::text').get().replace('发布日期：', '').replace('[',
                                                                                                                   '').replace(
                ']', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_anqinglist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])

        page_info_before = res.xpath('//div[@class="pagination"]/a[contains(text(), "最后一页")]/@href').extract_first()
        page_info_before_list = page_info_before.split("&page=")
        page_info = page_info_before_list[-1]
        # page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)

        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('ul[class="list"] li')
        for li in li_list:
            # print(li)
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('a:first-of-type::attr(href)').get()
            title = li.css('a:first-of-type::text').get()
            if url is None:
                continue
            # elif 'htm' not in url:
            #     continue
            rawid_list = url.split('?id=')
            # rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            temp["rawid"] = rawid_list[-1]
            temp["sub_db_id"] = '99308'
            article_json["url"] = "http://aqxxgk.anqing.gov.cn/" + url
            article_json["title"] = title.strip()

            article_json["pub_date"] = li.css('span::text').get()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    # print(result)

    return result


def policy_anqinglist3_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info_before = res.xpath(
            '//div[@class="pagination"]/a[contains(text(), "最后一页")]/@href').extract_first()
        if page_info_before:
            page_info_before_list = page_info_before.split("&page=")
            page_info = page_info_before_list[-1]
        else:
            page_info = 1
        # page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:

        li_list = res.css('ul[class="list"] li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('a:first-of-type::attr(href)').get()
            title = li.css('a:first-of-type::text').get()
            if url is None:
                continue
            # elif 'htm' not in url:
            #     continue

            rawid_list = url.split('?id=')
            # rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            temp["rawid"] = rawid_list[-1]
            temp["sub_db_id"] = '99308'
            article_json["url"] = "http://aqxxgk.anqing.gov.cn/" + url
            article_json["title"] = title.strip()

            article_json["pub_date"] = li.css('span::text').get()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    # print(result)

    return result


def policy_anqingarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_anqingarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="newstitle"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="word_t"]//text()').extract()).strip()
    # if not title:
    #     title = ''.join(res.xpath('//h1[@class="wztit"]//text()').extract()).strip()
    #     fulltext_xpath = '//div[@class="con_main"]'
    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    pub_no_before = res.xpath(
        '//table[contains(@class, "page_tab")]//th[contains(text(),"文") and contains(text(),"号")]')
    # pub_no_before = res.xpath('//div[contains(@class, "table_suoyin")]//th[contains(string(),"文号")]')
    # pub_no = ''.join(pub_no_before.xpath('./following-sibling::td[1]/text()').extract()).strip()
    pub_no = pub_no_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if pub_no is not None:
        pub_no = pub_no.strip()
    else:
        pub_no = ""

    index_no_before = res.xpath(
        '//table[contains(@class, "page_tab")]//th[contains(text(),"索") and contains(text(),"引")]')
    # index_no = ''.join(index_no_before.xpath('./following-sibling::td[1]/text()').extract()).strip()
    index_no = index_no_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if index_no is not None:
        index_no = index_no.strip()
    else:
        index_no = ""

    subject_before = res.xpath(
        '//table[contains(@class, "page_tab")]//th[contains(text(),"主") and contains(text(),"类")]')
    # subject_before = res.xpath('//div[contains(@class, "table_suoyin")]//th[contains(string(),"内容分类")]')
    subject = subject_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if subject is not None:
        subject = subject.strip()
    else:
        subject = ""

    # written_date_before = res.xpath(
    #     '//div[contains(@class, "table_suoyin")]//th[contains(text(),"成") and contains(text(),"文") and contains(text(),"期")]')
    # written_date_before = res.xpath('//div[contains(@class, "table_suoyin")]//th[contains(string(),"成文日期")]')
    # written_date = written_date_before.xpath('./following-sibling::td[1]/text()').extract_first()
    # if written_date is not None:
    #     written_date = written_date.strip()
    # else:
    #     written_date = ""

    legal_status_before = res.xpath(
        '//table[contains(@class, "page_tab")]//th[contains(text(),"有") and contains(text(),"效")]')
    legal_status = legal_status_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if legal_status is not None:
        legal_status = legal_status.strip()
    else:
        legal_status = ""
    # impl_date_before = res.xpath(
    #     '//div[contains(@class, "table_suoyin")]//th[contains(text(),"生") and contains(text(),"效") and contains(text(),"间")]')
    # impl_date = impl_date_before.xpath('./following-sibling::td[1]/text()').extract_first()
    # if impl_date is not None:
    #     impl_date = impl_date.strip()
    # else:
    #     impl_date = ""
    #
    # invalid_date_before = res.xpath(
    #     '//div[contains(@class, "table_suoyin")]//th[contains(text(),"废") and contains(text(),"止") and contains(text(),"间")]')
    # invalid_date = invalid_date_before.xpath('./following-sibling::td[1]/text()').extract_first()
    # if invalid_date is not None:
    #     invalid_date = invalid_date.strip()
    # else:
    #     invalid_date = ""

    keyword_before = res.xpath(
        '//table[contains(@class, "page_tab")]//th[contains(text(),"关") and contains(text(),"键")]')
    keyword = keyword_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if keyword is not None:
        keyword = keyword.strip()
    else:
        keyword = ""

    organ_before = res.xpath(
        '//table[contains(@class, "page_tab")]//th[contains(text(),"发") and contains(text(),"布") and contains(text(),"构")]')
    organ = organ_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if organ is not None:
        organ = organ.strip()
    else:
        organ = ""
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('市'):
        organ = '安庆' + organ

    fulltext_xpath = '//div[@class="word_c"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()

    # if not fulltext:
    #     fulltext_xpath = '//div[contains(@class, "wzcon")]'
    #     fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "conTx")]'
        fulltext_xpath = '//div[@id="J_content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99308'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "ANQING"
    zt_provider = "anqinggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['keyword'] = keyword
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'(//div[@aa])')
    # file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 安徽省黄山市
def policy_huangshanlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info = res.css('div div[class="listnews"] script').get()
        page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('div[id="curList"] ul li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('a::attr(href)').get()
            title = li.css('a::attr(title)').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99309'
            article_json["url"] = "https://www.huangshan.gov.cn" + url
            article_json["title"] = title
            article_json["pub_date"] = li.css('span[class="right date"]::text').get().replace('发布日期：', '').replace('[',
                                                                                                                   '').replace(
                ']', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_huangshanlist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])

        page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)

        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('div[class="zcwjbox"] tr[class="xxgk_nav_con"]')
        for li in li_list:
            # print(li)
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('td[class="bt info"] a[class="title"]::attr(href)').get()
            title = li.css('td[class="bt info"] a[class="title"]::attr(title)').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99309'
            article_json["url"] = url
            article_json["title"] = title.strip()

            article_json["pub_date"] = li.css('td[class="fbrq"]::text').get()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    # print(result)

    return result


def policy_huangshanlist3_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:

        li_list = res.css('ul[class="clearfix xxgk_nav_list"] li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('a[class="title"]::attr(href)').get()
            title = li.css('a[class="title"]::attr(title)').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99309'
            article_json["url"] = url
            article_json["title"] = title.strip()

            article_json["pub_date"] = li.css('span[class="date"]::text').get()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    # print(result)

    return result


def policy_huangshanlist4_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:

        li_list = res.css('ul[class="clearfix xxgk_nav_list"] li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('a[class="title"]::attr(href)').get()
            title = li.css('a[class="title"]::attr(title)').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99309'
            article_json["url"] = url
            article_json["title"] = title.strip()

            article_json["pub_date"] = li.css('span[class="date"]::text').get()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    # print(result)

    return result


def policy_huangshanlist5_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:

        li_list = res.css('div[class="xxgk_navli"] ul')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('li[class="mc"] a::attr(href)').get()
            title = li.css('li[class="mc"] a::text').get()
            if url is None:
                continue
            # elif 'htm' not in url:
            #     continue

            rawid_list = url.split('?id=')
            # rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            temp["rawid"] = rawid_list[-1]
            temp["sub_db_id"] = '99309'
            article_json["url"] = "https://www.huangshan.gov.cn" + url
            article_json["title"] = title.strip()
            article_json["pub_date"] = li.css('li[class="rq"]::text').get()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    # print(result)

    return result


def policy_huangshanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_huangshanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="newstitle"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="word_t"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1[@class="wztit"]//text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "page_tab")]//th[contains(text(),"文") and contains(text(),"号")]')
    pub_no_before = res.xpath('//div[contains(@class, "table_suoyin")]//th[contains(string(),"文号")]')
    # pub_no = ''.join(pub_no_before.xpath('./following-sibling::td[1]/text()').extract()).strip()
    pub_no = pub_no_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if pub_no is not None:
        pub_no = pub_no.strip()
    else:
        pub_no = ""

    # index_no_before = res.xpath(
    #     '//table[contains(@class, "page_tab")]//th[contains(text(),"索") and contains(text(),"引")]')
    # index_no = ''.join(index_no_before.xpath('./following-sibling::td[1]/text()').extract()).strip()
    index_no_before = res.xpath('//div[contains(@class, "table_suoyin")]//th[contains(string(),"索引号")]')
    index_no = index_no_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if index_no is not None:
        index_no = index_no.strip()
    else:
        index_no = ""

    # subject_before = res.xpath('//table[contains(@class, "page_tab")]//th[contains(text(),"主") and contains(text(),"类")]')
    subject_before = res.xpath('//div[contains(@class, "table_suoyin")]//th[contains(string(),"主题分类")]')

    subject = subject_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if subject is not None:
        subject = subject.strip()
    else:
        subject = ""

    # written_date_before = res.xpath(
    #     '//div[contains(@class, "table_suoyin")]//th[contains(text(),"成") and contains(text(),"文") and contains(text(),"期")]')
    # written_date_before = res.xpath('//div[contains(@class, "table_suoyin")]//th[contains(string(),"成文日期")]')
    # written_date = written_date_before.xpath('./following-sibling::td[1]/text()').extract_first()
    # if written_date is not None:
    #     written_date = written_date.strip()
    # else:
    #     written_date = ""

    # legal_status_before = res.xpath(
    #     '//table[contains(@class, "page_tab")]//th[contains(text(),"有") and contains(text(),"效")]')
    legal_status_before = res.xpath('//div[contains(@class, "table_suoyin")]//th[contains(string(),"有效性")]')
    legal_status = legal_status_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if legal_status is not None:
        legal_status = legal_status.strip()
    else:
        legal_status = ""
    impl_date_before = res.xpath('//div[contains(@class, "table_suoyin")]//th[contains(string(),"生效日期")]')
    impl_date = impl_date_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if impl_date is not None:
        impl_date = impl_date.strip()
    else:
        impl_date = ""

    # invalid_date_before = res.xpath(
    #     '//div[contains(@class, "table_suoyin")]//th[contains(text(),"废") and contains(text(),"止") and contains(text(),"间")]')
    invalid_date_before = res.xpath('//div[contains(@class, "table_suoyin")]//th[contains(string(),"废止日期")]')
    invalid_date = invalid_date_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if invalid_date is not None:
        invalid_date = invalid_date.strip()
    else:
        invalid_date = ""

    # keyword_before = res.xpath('//table[contains(@class, "page_tab")]//th[contains(text(),"关") and contains(text(),"键")]')
    keyword_before = res.xpath('//div[contains(@class, "table_suoyin")]//th[contains(string(),"关键词")]')
    keyword = keyword_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if keyword is not None:
        keyword = keyword.strip()
    else:
        keyword = ""

    # organ_before = res.xpath('//table[contains(@class, "page_tab")]//th[contains(text(),"发") and contains(text(),"布") and contains(text(),"构")]')
    organ_before = res.xpath('//div[contains(@class, "table_suoyin")]//th[contains(string(),"发布机构")]')
    organ = organ_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if organ is not None:
        organ = organ.strip()
    else:
        organ = ""
    if not organ:
        organ_info = ''.join(res.xpath('//div[@class="wzfbxx_left fl"]/span[@class="aut"]//text()').extract()).strip()
        organ = organ_info.replace("发布机构：", "").strip()
    if organ.startswith('市'):
        organ = '黄山' + organ

    fulltext_xpath = '//div[@class="wzcon j-fontContent clearfix"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()

    # if not fulltext:
    #     fulltext_xpath = '//div[contains(@class, "wzcon")]'
    #     fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "conTx")]'
        fulltext_xpath = '//div[@class="gkwz_contnet clearfix"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99309'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "HUANGSHAN"
    zt_provider = "huangshangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    data['impl_date'] = clean_pubdate(impl_date)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['keyword'] = keyword
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'(//div[@aa])')
    # file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 安徽省阜阳市
def policy_fylist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info_before = res.xpath(
            '//div[@id="page-list"]/a/span[contains(text(), "尾页")]/ancestor::a/@href').extract_first()
        page_info_before_list = page_info_before.split("page-")
        page_info = page_info_before_list[-1].replace("/", "")
        # page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('div[class="list-right"] ul li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('a::attr(href)').get()
            title = li.css('a::attr(title)').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            # rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            rawid = rawid_list[-1].replace(".html", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99310'
            article_json["url"] = "https://www.fy.gov.cn" + url
            article_json["title"] = title
            article_json["pub_date"] = li.css('span::text').get().replace('发布日期：', '').replace('[',
                                                                                               '').replace(
                ']', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_fylist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])

        page_info = res.xpath(
            '//section[@id="pagination"]/pagination/@pagecount').extract_first()

        # page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)

        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('ul[class="pub"] li')
        for li in li_list:
            # print(li)
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('a:first-of-type::attr(href)').get()
            title = li.css('a:first-of-type::text').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue
            rawid_list = url.split('/')
            rawid = rawid_list[-1].replace(".html", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99310'
            article_json["url"] = "https://www.fy.gov.cn" + url
            article_json["title"] = title.strip()
            article_json["pub_date"] = li.css('span::text').get()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    # print(result)

    return result


def policy_fyarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_fyarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@id="downloads_title"]//text()').extract()).strip()
    # if not title:
    #     title = ''.join(res.xpath('//div[@class="word_t"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1//text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "page_tab")]//th[contains(text(),"文") and contains(text(),"号")]')
    pub_no_before = res.xpath('//div[contains(@class, "m-syh")]//td[contains(text(),"文") and contains(text(),"号")]')
    # pub_no = ''.join(pub_no_before.xpath('./following-sibling::td[1]/text()').extract()).strip()
    pub_no = pub_no_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if pub_no is not None:
        pub_no = pub_no.strip()
    else:
        pub_no = ""

    index_no_before = res.xpath('//div[contains(@class, "m-syh")]//td[contains(text(),"索") and contains(text(),"引")]')
    # index_no = ''.join(index_no_before.xpath('./following-sibling::td[1]/text()').extract()).strip()
    # index_no_before = res.xpath('//div[contains(@class, "table_suoyin")]//th[contains(string(),"索引号")]')
    index_no = index_no_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if index_no is not None:
        index_no = index_no.strip()
    else:
        index_no = ""

    # subject_before = res.xpath('//table[contains(@class, "page_tab")]//th[contains(text(),"主") and contains(text(),"类")]')
    # subject_before = res.xpath('//div[contains(@class, "table_suoyin")]//th[contains(string(),"主题分类")]')
    #
    # subject = subject_before.xpath('./following-sibling::td[1]/text()').extract_first()
    # if subject is not None:
    #     subject = subject.strip()
    # else:
    #     subject = ""

    written_date_before = res.xpath(
        '//div[contains(@class, "m-syh")]//td[contains(text(),"成") and contains(text(),"文") and contains(text(),"期")]')
    # written_date_before = res.xpath('//div[contains(@class, "table_suoyin")]//th[contains(string(),"成文日期")]')
    written_date = written_date_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if written_date is not None:
        written_date = written_date.strip()
    else:
        written_date = ""

    legal_status_before = res.xpath(
        '//div[contains(@class, "m-syh")]//td[contains(text(),"有") and contains(text(),"效") and contains(text(),"性")]')
    # legal_status_before = res.xpath('//div[contains(@class, "table_suoyin")]//th[contains(string(),"有效性")]')
    legal_status = legal_status_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if legal_status is not None:
        legal_status = legal_status.strip()
    else:
        legal_status = ""
    # impl_date_before = res.xpath('//div[contains(@class, "table_suoyin")]//th[contains(string(),"生效日期")]')
    # impl_date = impl_date_before.xpath('./following-sibling::td[1]/text()').extract_first()
    # if impl_date is not None:
    #     impl_date = impl_date.strip()
    # else:
    #     impl_date = ""

    # invalid_date_before = res.xpath(
    #     '//div[contains(@class, "table_suoyin")]//th[contains(text(),"废") and contains(text(),"止") and contains(text(),"间")]')
    # invalid_date_before = res.xpath('//div[contains(@class, "table_suoyin")]//th[contains(string(),"废止日期")]')
    # invalid_date = invalid_date_before.xpath('./following-sibling::td[1]/text()').extract_first()
    # if invalid_date is not None:
    #     invalid_date = invalid_date.strip()
    # else:
    #     invalid_date = ""

    keyword_before = res.xpath('//div[contains(@class, "m-syh")]//td[contains(text(),"关") and contains(text(),"键")]')
    # keyword_before = res.xpath('//div[contains(@class, "table_suoyin")]//th[contains(string(),"关键词")]')
    keyword = keyword_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if keyword is not None:
        keyword = keyword.strip()
    else:
        keyword = ""

    organ_before = res.xpath(
        '//div[contains(@class, "m-syh")]//td[contains(text(),"发") and contains(text(),"布") and contains(text(),"构")]')
    # organ_before = res.xpath('//div[contains(@class, "table_suoyin")]//th[contains(string(),"发布机构")]')
    organ = organ_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if organ is not None:
        organ = organ.strip()
    else:
        organ = ""
    # if not organ:
    #     organ_info = ''.join(res.xpath('//div[@class="wzfbxx_left fl"]/span[@class="aut"]//text()').extract()).strip()
    #     organ = organ_info.replace("发布机构：", "").strip()
    if organ.startswith('市'):
        organ = '阜阳' + organ

    fulltext_xpath = '//div[@class="m-zw j-fontContent row"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()

    # if not fulltext:
    #     fulltext_xpath = '//div[contains(@class, "wzcon")]'
    #     fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "conTx")]'
        fulltext_xpath = '//div[@id="zoom"]'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99310'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "FY"
    zt_provider = "fygovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    data['keyword'] = keyword
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    fulltext_xpath1 = '//div[@class="is-downlist"]'
    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 安徽省宿州市
def policy_ahszlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('ul[class~="doc_list"] li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('a::attr(href)').get()
            title = li.css('a::attr(title)').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            # rawid = rawid_list[-1].replace(".html", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99311'
            article_json["url"] = url
            article_json["title"] = title
            article_json["pub_date"] = li.css('span[class="right date"]::text').get().replace('发布日期：', '').replace('[',
                                                                                                                   '').replace(
                ']', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_ahszlist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)

        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('ul[class="zcwj_list"] li')
        for li in li_list:
            # print(li)
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('a[class="bt"]::attr(href)').get()
            title = li.css('a[class="bt"]::attr(title)').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            # rawid = rawid_list[-1].replace(".html", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99311'
            article_json["url"] = url
            article_json["title"] = title.strip()
            article_json["pub_date"] = li.css('span[class="date"]::text').get()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    # print(result)

    return result


def policy_ahszlist3_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        page_info = re.findall("pageCount:\((\d+) \+", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)

        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('div[class="xxgk_nav_con"] ul li')
        for li in li_list:
            # print(li)
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('a[class="title"]::attr(href)').get()
            title = li.css('a[class="title"]::attr(title)').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            # rawid = rawid_list[-1].replace(".html", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99311'
            article_json["url"] = url
            article_json["title"] = title.strip()
            article_json["pub_date"] = li.css('span[class="date"]::text').get()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    # print(result)

    return result


def policy_ahszarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_ahszarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@id="newstitle"]//text()').extract()).strip()
    # if not title:
    #     title = ''.join(res.xpath('//div[@class="word_t"]//text()').extract()).strip()
    # if not title:
    #     title = ''.join(res.xpath('//h1//text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "page_tab")]//th[contains(text(),"文") and contains(text(),"号")]')
    pub_no_before = res.xpath('//div[contains(@class, "table_suoyin")]//th[contains(string(),"文号")]')
    # pub_no = ''.join(pub_no_before.xpath('./following-sibling::td[1]/text()').extract()).strip()
    pub_no = pub_no_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if pub_no is not None:
        pub_no = pub_no.strip()
    else:
        pub_no = ""

    # index_no_before = res.xpath('//div[contains(@class, "m-syh")]//td[contains(text(),"索") and contains(text(),"引")]')
    # index_no = ''.join(index_no_before.xpath('./following-sibling::td[1]/text()').extract()).strip()
    index_no_before = res.xpath('//div[contains(@class, "table_suoyin")]//th[contains(string(),"索引号")]')
    index_no = index_no_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if index_no is not None:
        index_no = index_no.strip()
    else:
        index_no = ""

    # subject_before = res.xpath('//table[contains(@class, "page_tab")]//th[contains(text(),"主") and contains(text(),"类")]')
    subject_before = res.xpath('//div[contains(@class, "table_suoyin")]//th[contains(string(),"内容分类")]')

    subject = subject_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if subject is not None:
        subject = subject.strip()
    else:
        subject = ""

    # written_date_before = res.xpath('//div[contains(@class, "m-syh")]//td[contains(text(),"成") and contains(text(),"文") and contains(text(),"期")]')
    written_date_before = res.xpath('//div[contains(@class, "table_suoyin")]//th[contains(string(),"成文日期")]')
    written_date = written_date_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if written_date is not None:
        written_date = written_date.strip()
    else:
        written_date = ""

    # legal_status_before = res.xpath('//div[contains(@class, "m-syh")]//td[contains(text(),"有") and contains(text(),"效") and contains(text(),"性")]')
    legal_status_before = res.xpath('//div[contains(@class, "table_suoyin")]//th[contains(string(),"有效性")]')
    legal_status = legal_status_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if legal_status is not None:
        legal_status = legal_status.strip()
    else:
        legal_status = ""
    # impl_date_before = res.xpath('//div[contains(@class, "table_suoyin")]//th[contains(string(),"生效日期")]')
    # impl_date = impl_date_before.xpath('./following-sibling::td[1]/text()').extract_first()
    # if impl_date is not None:
    #     impl_date = impl_date.strip()
    # else:
    #     impl_date = ""

    # invalid_date_before = res.xpath(
    #     '//div[contains(@class, "table_suoyin")]//th[contains(text(),"废") and contains(text(),"止") and contains(text(),"间")]')
    invalid_date_before = res.xpath('//div[contains(@class, "table_suoyin")]//th[contains(string(),"废止时间")]')
    invalid_date = invalid_date_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if invalid_date is not None:
        invalid_date = invalid_date.strip()
    else:
        invalid_date = ""

    # keyword_before = res.xpath('//div[contains(@class, "m-syh")]//td[contains(text(),"关") and contains(text(),"键")]')
    keyword_before = res.xpath('//div[contains(@class, "table_suoyin")]//th[contains(string(),"关键词")]')
    keyword = keyword_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if keyword is not None:
        keyword = keyword.strip()
    else:
        keyword = ""

    # organ_before = res.xpath('//div[contains(@class, "m-syh")]//td[contains(text(),"发") and contains(text(),"布") and contains(text(),"构")]')
    organ_before = res.xpath('//div[contains(@class, "table_suoyin")]//th[contains(string(),"发布机构")]')
    organ = organ_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if organ is not None:
        organ = organ.strip()
    else:
        organ = ""
    # if not organ:
    #     organ_info = ''.join(res.xpath('//div[@class="wzfbxx_left fl"]/span[@class="aut"]//text()').extract()).strip()
    #     organ = organ_info.replace("发布机构：", "").strip()
    if organ.startswith('市'):
        organ = '宿州' + organ

    fulltext_xpath = '//div[@class="j-fontContent clearfix"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()

    # if not fulltext:
    #     fulltext_xpath = '//div[contains(@class, "wzcon")]'
    #     fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        # fulltext_xpath = '//div[contains(@id, "j-fontContent")]'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99311'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "AHSZ"
    zt_provider = "ahszgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['keyword'] = keyword
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[@class="is-downlist"]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 安徽省滁州市
def policy_chuzhoulist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('div[class="listnews"] ul li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('a::attr(href)').get()
            title = li.css('a::attr(title)').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            # rawid = rawid_list[-1].replace(".html", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99312'
            article_json["url"] = url
            article_json["title"] = title
            article_json["pub_date"] = li.css('span[class="date"]::text').get().replace('发布日期：', '').replace('[',
                                                                                                             '').replace(
                ']', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_chuzhoulist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)

        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('div[class="xxgk_nav_con"] ul li')
        for li in li_list:
            # print(li)
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()

            url = li.css('a[class="title"]::attr(href)').get()
            title = li.css('a[class="title"]::attr(title)').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            # rawid = rawid_list[-1].replace(".html", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99312'
            article_json["url"] = url
            article_json["title"] = title.strip()
            article_json["pub_date"] = li.css('span[class="date"]::text').get()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    # print(result)

    return result


def policy_chuzhoulist3_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:

        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('ul[class~="doc_list"] li')
        for li in li_list:
            # print(li)
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('a::attr(href)').get()
            title = li.css('a::attr(title)').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            # rawid = rawid_list[-1].replace(".html", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99312'
            article_json["url"] = url
            article_json["title"] = title.strip()
            article_json["pub_date"] = li.css('span[class="date"]::text').get()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    # print(result)

    return result


def policy_chuzhouarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_chuzhouarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@id="newstitle"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no_before = res.xpath(
        '//table[contains(@class, "table_suoyin")]//th[contains(text(),"文") and contains(text(),"号")]')
    pub_no = pub_no_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if pub_no is not None:
        pub_no = pub_no.strip()
    else:
        pub_no = ""
    index_no_before = res.xpath('//table[contains(@class, "table_suoyin")]//th[contains(string(),"索引号")]')
    index_no = index_no_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if index_no is not None:
        index_no = index_no.strip()
    else:
        index_no = ""

    written_date_before = res.xpath('//table[contains(@class, "table_suoyin")]//th[contains(string(),"成文日期")]')
    written_date = written_date_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if written_date is not None:
        written_date = written_date.strip()
    else:
        written_date = ""
    legal_status_before = res.xpath('//table[contains(@class, "table_suoyin")]//th[contains(string(),"有效期")]')
    legal_status = legal_status_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if legal_status is not None:
        legal_status = legal_status.strip()
    else:
        legal_status = ""
    organ_before = res.xpath('//table[contains(@class, "table_suoyin")]//th[contains(string(),"发布机构")]')
    organ = organ_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if organ is not None:
        organ = organ.strip()
    else:
        organ = ""
    if organ.startswith('市'):
        organ = '滁州' + organ

    fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99312'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CHUZHOU"
    zt_provider = "chuzhougovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['keyword'] = keyword
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


#  安徽省六安市
def policy_luanlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            max_count = re.findall('pageCount:(\d+)', para_dicts["data"]["1_1"]['html'])
            max_count = int(max_count[0]) if max_count else 1
            total_page = max_count
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"{list_json['page_info']}_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'zfxxgk1' in list_rawid:
        li_list = res.xpath('//ul[@class="doc_list list-6790131"]/li|//body/li|//table/tbody/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('div/a/@href|td[4]/a/@href|a/@href').extract_first()
            if not href:
                continue
            base_url = f'https://www.luan.gov.cn/{callmodel.sql_model.list_rawid}'
            url = parse.urljoin(base_url, href)
            if 'luan' not in url:
                continue
            if 'id=' in url:
                rawid = re.findall('id=(.*)', url)[0]
            else:
                if '.' not in url.split('/')[-1]:
                    continue
                rawid = re.findall('(.*)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99313'
            article_json["url"] = url
            article_json["title"] = li.xpath('div/a/@title|td[4]/a/@title|a/@title').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()|td[3]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_luanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_luanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    # title = ''.join(res.xpath('//div[@class="article  pub_border"]/h2//text()').extract()).strip()
    # if not title:
    #     title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[contains(@class,"hidden-sm")]//th[contains(text(),"文") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[contains(@class,"hidden-sm")]//th[text()="索"]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[contains(@class,"hidden-sm")]//th[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    subject_word = ''.join(res.xpath('//table[contains(@class,"hidden-sm")]//th[text()="关"]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[contains(@class,"hidden-sm")]//th[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[contains(@class,"hidden-sm")]//th[text()="有"]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[contains(@class,"hidden-sm")]//th[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '六安' + organ

    fulltext_xpath = '//div[contains(@class,"j-fontContent")]|//div[@class="gzk-article"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99313'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "LUAN"
    zt_provider = "luangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result



# 安徽省宣城市
def policy_xuanchenglist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = res.xpath(
            '//section[@id="pagination"]/pagination/@pagecount').extract_first()
        # page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('section[class~="m-tglist"] ul li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('a:first-of-type::attr(href)').get()
            title = li.css('a:first-of-type::attr(title)').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            # rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            rawid = rawid_list[-1].replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99314'
            article_json["url"] = "https://www.xuancheng.gov.cn" + url
            article_json["title"] = title
            article_json["pub_date"] = li.css('span::text').get().replace('发布日期：', '').replace('[',
                                                                                               '').replace(
                ']', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_xuanchenglist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = res.xpath(
            '//div[@id="pagination"]/pagination/@pagecount').extract_first()
        # page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)

        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('div[class~="m-cglists"] ul li')
        for li in li_list:
            # print(li)
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()

            url = li.css('a:first-of-type::attr(href)').get()
            title = li.css('a:first-of-type::attr(title)').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue
            rawid_list = url.split('/')
            # rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            rawid = rawid_list[-1].replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99314'
            article_json["url"] = "https://www.xuancheng.gov.cn" + url
            article_json["title"] = title.strip()
            article_json["pub_date"] = li.css('span::text').get()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    # print(result)

    return result


def policy_xuanchengarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_xuanchengarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@id="newstitle"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="text-center u-title"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1[@id="title"]//text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "table_suoyin")]//th[contains(text(),"文") and contains(text(),"号")]')
    pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//td[contains(string(),"文号")]')
    # pub_no = ''.join(pub_no_before.xpath('./following-sibling::td[1]/text()').extract()).strip()
    pub_no = pub_no_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if pub_no is not None:
        pub_no = pub_no.strip()
    else:
        pub_no = ""

    # index_no_before = res.xpath('//div[contains(@class, "m-syh")]//td[contains(text(),"索") and contains(text(),"引")]')
    # index_no = ''.join(index_no_before.xpath('./following-sibling::td[1]/text()').extract()).strip()
    index_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//td[contains(string(),"索引号")]')
    index_no = index_no_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if index_no is not None:
        index_no = index_no.strip()
    else:
        index_no = ""

    # subject_before = res.xpath('//table[contains(@class, "page_tab")]//th[contains(text(),"主") and contains(text(),"类")]')
    subject_before = res.xpath('//table[contains(@class, "m-detailtb")]//td[contains(string(),"主题分类")]')

    subject = subject_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if subject is not None:
        subject = subject.strip()
    else:
        subject = ""

    # written_date_before = res.xpath('//div[contains(@class, "m-syh")]//td[contains(text(),"成") and contains(text(),"文") and contains(text(),"期")]')
    # written_date_before = res.xpath('//table[contains(@class, "table_suoyin")]//th[contains(string(),"成文日期")]')
    # written_date = written_date_before.xpath('./following-sibling::td[1]/text()').extract_first()
    # if written_date is not None:
    #     written_date = written_date.strip()
    # else:
    #     written_date = ""

    # legal_status_before = res.xpath('//div[contains(@class, "m-syh")]//td[contains(text(),"有") and contains(text(),"效") and contains(text(),"性")]')
    # legal_status_before = res.xpath('//table[contains(@class, "table_suoyin")]//th[contains(string(),"有效期")]')
    # legal_status = legal_status_before.xpath('./following-sibling::td[1]/text()').extract_first()
    # if legal_status is not None:
    #     legal_status = legal_status.strip()
    # else:
    #     legal_status = ""
    # impl_date_before = res.xpath('//div[contains(@class, "table_suoyin")]//th[contains(string(),"生效日期")]')
    # impl_date = impl_date_before.xpath('./following-sibling::td[1]/text()').extract_first()
    # if impl_date is not None:
    #     impl_date = impl_date.strip()
    # else:
    #     impl_date = ""

    # invalid_date_before = res.xpath(
    #     '//div[contains(@class, "table_suoyin")]//th[contains(text(),"废") and contains(text(),"止") and contains(text(),"间")]')
    # invalid_date_before = res.xpath('//div[contains(@class, "table_suoyin")]//th[contains(string(),"废止时间")]')
    # invalid_date = invalid_date_before.xpath('./following-sibling::td[1]/text()').extract_first()
    # if invalid_date is not None:
    #     invalid_date = invalid_date.strip()
    # else:
    #     invalid_date = ""

    # keyword_before = res.xpath('//div[contains(@class, "m-syh")]//td[contains(text(),"关") and contains(text(),"键")]')
    # keyword_before = res.xpath('//div[contains(@class, "table_suoyin")]//th[contains(string(),"关键词")]')
    # keyword = keyword_before.xpath('./following-sibling::td[1]/text()').extract_first()
    # if keyword is not None:
    #     keyword = keyword.strip()
    # else:
    #     keyword = ""

    # organ_before = res.xpath('//div[contains(@class, "m-syh")]//td[contains(text(),"发") and contains(text(),"布") and contains(text(),"构")]')
    organ_before = res.xpath('//table[contains(@class, "m-detailtb")]//td[contains(string(),"发布机构")]')
    organ = organ_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if organ is not None:
        organ = organ.strip()
    else:
        organ = ""
    # if not organ:
    #     organ_info = ''.join(res.xpath('//div[@class="wzfbxx_left fl"]/span[@class="aut"]//text()').extract()).strip()
    #     organ = organ_info.replace("发布机构：", "").strip()
    if organ.startswith('市'):
        organ = '宣城' + organ

    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()

    # if not fulltext:
    #     fulltext_xpath = '//div[@id="zoom"]'
    #     fulltext = res.xpath(fulltext_xpath).extract_first()
    # if not fulltext:
    #     fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
    # fulltext_xpath = '//div[contains(@id, "j-fontContent")]'

    # fulltext_xpath2 = '//div[@id="zoom"]'
    # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    # fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99314'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "XUANCHENG"
    zt_provider = "xuanchenggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['keyword'] = keyword
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[@class="is-downlist"]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 安徽省池州市
def policy_chizhoulist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = res.xpath(
            '//div[@id="pagination"]/pagination/@pagecount').extract_first()
        # page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('div[class~="m-cglists"] ul li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('a::attr(href)').get()
            title = li.css('a::attr(title)').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            # rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            rawid = rawid_list[-1].replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99315'
            article_json["url"] = "https://www.chizhou.gov.cn" + url
            article_json["title"] = title
            article_json["pub_date"] = li.css('span::text').get().replace('发布日期：', '').replace('[',
                                                                                               '').replace(
                ']', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_chizhoulist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = res.xpath(
            '//section[@id="pagination"]/pagination/@pagecount').extract_first()
        # page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)

        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('section[class~="m-tablelist"] tr')
        for li in li_list:
            # print(li)
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()

            url = li.css('td[class="u-info"] a::attr(href)').get()
            title = li.css('td[class="u-info"] a::attr(title)').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue
            rawid_list = url.split('/')
            # rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            rawid = rawid_list[-1].replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99315'
            article_json["url"] = "https://www.chizhou.gov.cn" + url
            article_json["title"] = title.strip()
            article_json["pub_date"] = li.css('td:last-of-type::text').get()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    # print(result)

    return result


def policy_chizhoulist3_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = res.xpath(
            '//section[@id="pagination"]/pagination/@pagecount').extract_first()
        # page_info = re.findall("pageCount:(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)

        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('section[class~="m-tglist"] ul li')
        for li in li_list:
            # print(li)
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()

            url = li.css('a:first-of-type::attr(href)').get()
            title = li.css('a:first-of-type::attr(title)').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue
            rawid_list = url.split('/')
            # rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            rawid = rawid_list[-1].replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99315'
            article_json["url"] = "https://www.chizhou.gov.cn" + url
            article_json["title"] = title.strip()
            article_json["pub_date"] = li.css('span::text').get()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    # print(result)

    return result


def policy_chizhouarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_chizhouarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="m-detailtit text-center"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="text-center u-title"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1//text()').extract()).strip()
    if '\n' in title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//td[contains(string(),"文号")]')
    if not pub_no_before:
        pub_no_before = res.xpath('//div[contains(@class, "m-detailtable")]//td[contains(string(),"文号")]')
    pub_no = pub_no_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if pub_no is not None:
        pub_no = pub_no.strip()
    else:
        pub_no = ""
    index_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//td[contains(string(),"索引号")]')
    if not index_no_before:
        index_no_before = res.xpath('//div[contains(@class, "m-detailtable")]//td[contains(string(),"索引号")]')
    index_no = index_no_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if index_no is not None:
        index_no = index_no.strip()
    else:
        index_no = ""
    subject_before = res.xpath('//table[contains(@class, "m-detailtb")]//td[contains(string(),"主题分类")]')
    if not subject_before:
        subject_before = res.xpath('//div[contains(@class, "m-detailtable")]//td[contains(string(),"主题分类")]')

    subject = subject_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if subject is not None:
        subject = subject.strip()
    else:
        subject = ""
    written_date_before = res.xpath('//table[contains(@class, "m-detailtb")]//td[contains(string(),"成文日期")]')
    if not written_date_before:
        written_date_before = res.xpath('//div[contains(@class, "m-detailtable")]//td[contains(string(),"成文日期")]')
    written_date = written_date_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if written_date is not None:
        written_date = written_date.strip()
    else:
        written_date = ""
    legal_status_before = res.xpath('//table[contains(@class, "m-detailtb")]//td[contains(string(),"有效性")]')
    if not legal_status_before:
        legal_status_before = res.xpath('//div[contains(@class, "m-detailtable")]//td[contains(string(),"有效性")]')
    legal_status = legal_status_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if legal_status is not None:
        legal_status = legal_status.strip()
    else:
        legal_status = ""
    impl_date_before = res.xpath('//table[contains(@class, "m-detailtb")]//td[contains(string(),"生效日期")]')
    if not impl_date_before:
        impl_date_before = res.xpath('//div[contains(@class, "m-detailtable")]//td[contains(string(),"生效日期")]')
    impl_date = impl_date_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if impl_date is not None:
        impl_date = impl_date.strip()
    else:
        impl_date = ""
    invalid_date_before = res.xpath('//table[contains(@class, "m-detailtb")]//td[contains(string(),"废止日期")]')
    if not invalid_date_before:
        invalid_date_before = res.xpath('//div[contains(@class, "m-detailtable")]//td[contains(string(),"废止日期")]')
    invalid_date = invalid_date_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if invalid_date is not None:
        invalid_date = invalid_date.strip()
    else:
        invalid_date = ""
    organ_before = res.xpath('//table[contains(@class, "m-detailtb")]//td[contains(string(),"发布机构")]')
    if not organ_before:
        organ_before = res.xpath('//div[contains(@class, "m-detailtable")]//td[contains(string(),"发布机构")]')
    organ = organ_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if organ is not None:
        organ = organ.strip()
    else:
        organ = ""
    if organ.startswith('市'):
        organ = '池州' + organ

    fulltext_xpath = '//div[contains(@class, "g-detailbox")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()

    if not fulltext:
        fulltext_xpath = '//div[contains(@class, "m-detailleft")]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99315'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CHIZHOU"
    zt_provider = "chizhougovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['impl_date'] = clean_pubdate(impl_date)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['keyword'] = keyword
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 安徽省亳州市
def policy_bozhoulist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = res.xpath(
            '//div[@id="pagination"]/pagination/@pagecount').extract_first()
        if page_info:
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('div[class~="m-listcg"] ul li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('a::attr(href)').get()
            title = li.css('a::attr(title)').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            rawid = rawid_list[-1].replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99316'
            article_json["url"] = "https://www.bozhou.gov.cn" + url
            article_json["title"] = title
            article_json["pub_date"] = li.css('span::text').get().replace('发布日期：', '').replace('[',
                                                                                               '').replace(
                ']', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_bozhoulist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = res.xpath(
            '//section[@id="pagination"]/pagination/@pagecount').extract_first()
        if page_info:
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)

        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('section[class~="m-tglist"] ul li')
        for li in li_list:
            # print(li)
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()

            url = li.css('a:first-of-type::attr(href)').get()
            title = li.css('a:first-of-type::attr(title)').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue
            rawid_list = url.split('/')
            rawid = rawid_list[-1].replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99316'
            article_json["url"] = "https://www.bozhou.gov.cn" + url
            article_json["title"] = title.strip()
            article_json["pub_date"] = li.css('span::text').get()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)
    # print(result)

    return result


def policy_bozhouarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_bozhouarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="text-center u-title"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1//text()').extract()).strip()
    if '相关新闻' in title:
        title = cleaned(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract_first())
    if not title:
        title = article_json['title'].strip()
    pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//td[contains(string(),"文号")]')
    pub_no = pub_no_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if pub_no is not None:
        pub_no = pub_no.strip()
    else:
        pub_no = ""
    index_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//td[contains(string(),"索引号")]')
    index_no = index_no_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if index_no is not None:
        index_no = index_no.strip()
    else:
        index_no = ""
    subject_before = res.xpath('//table[contains(@class, "m-detailtb")]//td[contains(string(),"主题分类")]')
    subject = subject_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if subject is not None:
        subject = subject.strip()
    else:
        subject = ""
    legal_status_before = res.xpath('//table[contains(@class, "m-detailtb")]//td[contains(string(),"有效性")]')
    legal_status = legal_status_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if legal_status is not None:
        legal_status = legal_status.strip()
    else:
        legal_status = ""
    keyword_before = res.xpath('//table[contains(@class, "m-detailtb")]//td[contains(string(),"关键词")]')
    keyword = keyword_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if keyword is not None:
        keyword = keyword.strip()
    else:
        keyword = ""
    organ_before = res.xpath('//table[contains(@class, "m-detailtb")]//td[contains(string(),"发布机构")]')
    organ = organ_before.xpath('./following-sibling::td[1]/text()').extract_first()
    if organ is not None:
        organ = organ.strip()
    else:
        organ = ""
    if organ.startswith('市'):
        organ = '亳州' + organ

    fulltext_xpath = '//div[contains(@class, "g-detailbox")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99316'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "BOZHOU"
    zt_provider = "bozhougovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['keyword'] = keyword
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 海南省发展和改革委员会
def policy_planhainanlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info_before = res.xpath(
        #     '//div[@id="page_div"]/div//span/a[contains(text(), "末页")]/@href').extract_first()
        # page_info_before_list = page_info_before.split("_")
        # page_info = page_info_before_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
        page_info = re.findall("createPageHTML\('page_div',(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('div[class~="cen-div-1"] div[class~="list_div"]')
        if not li_list:
            li_list = res.css('div[class="Fivelist"] ul li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('a::attr(href)').get()
            title = li.css('a::text').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            # rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99317'
            article_json["url"] = "http://plan.hainan.gov.cn" + url
            article_json["title"] = title
            pub_date = li.css('table td:first-of-type::text').get()
            if not pub_date:
                pub_date = li.css('em::text').get()
            article_json["pub_date"] = pub_date.replace('发布时间：', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_planhainanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_planhainanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//ucaptitle//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1//text()').extract()).strip()
    if '\n' in title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(
        res.xpath(
            '//div[contains(@class, "zwgk_comr1")]//strong[contains(text(),"文") and contains(text(),"号")]/ancestor::span/text()').extract()).strip()
    index_no = ''.join(res.xpath(
        '//div[contains(@class, "zwgk_comr1")]//strong[contains(text(),"索") and contains(text(),"引")]/ancestor::span/text()').extract()).strip()
    subject = ''.join(res.xpath(
        '//div[contains(@class, "zwgk_comr1")]//strong[contains(text(),"主") and contains(text(),"题") and contains(text(),"类")]/ancestor::span/text()').extract()).strip()
    written_date = ''.join(res.xpath(
        '//div[contains(@class, "zwgk_comr1")]//strong[contains(text(),"成") and contains(text(),"文") and contains(text(),"日")]/ancestor::span/text()').extract()).strip()
    legal_status = ''.join(res.xpath(
        '//div[contains(@class, "zwgk_comr1")]//strong[contains(text(),"时") and contains(text(),"效") and contains(text(),"性")]/ancestor::span/text()').extract()).strip()
    organ = ''.join(res.xpath(
        '//div[contains(@class, "zwgk_comr1")]//strong[contains(text(),"发") and contains(text(),"机") and contains(text(),"关")]/ancestor::span/text()').extract()).strip()
    if organ.startswith('省'):
        organ = '海南' + organ

    fulltext_xpath = '//ucapcontent'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99317'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "PLANHAINAN"
    zt_provider = "planhainangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['keyword'] = keyword
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 海南省工业和信息化厅
def policy_iitbhainanlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info_before = res.xpath(
        #     '//div[@id="page_div"]/div//span/a[contains(text(), "末页")]/@href').extract_first()
        # page_info_before_list = page_info_before.split("_")
        # page_info = page_info_before_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
        page_info = re.findall("createPageHTML\('page_div',(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('div[class~="cen-div-1"] div[class~="list_div"]')
        # if not li_list:
        #     li_list = res.css('div[class="Fivelist"] ul li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('a::attr(href)').get()
            title = li.css('a::text').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            # rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99318'
            article_json["url"] = "http://iitb.hainan.gov.cn" + url
            article_json["title"] = title
            pub_date = li.css('table td:first-of-type::text').get()
            # if not pub_date:
            #     pub_date = li.css('em::text').get()
            article_json["pub_date"] = pub_date.replace('发布时间：', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_iitbhainanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_iitbhainanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//ucaptitle//text()').extract()).strip()
    # if not title:
    #     title = ''.join(res.xpath('//div[@class="text-center u-title"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1//text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//td[contains(string(),"文号")]')
    pub_no = ''.join(
        res.xpath(
            '//div[contains(@class, "zwgk_comr1")]//strong[contains(text(),"文") and contains(text(),"号")]/ancestor::span/text()').extract()).strip()
    # pub_no = ''.join(pub_no_before.xpath('./following-sibling::td[1]/text()').extract()).strip()
    # pub_no = pub_no_before.xpath('./following-sibling::td[1]/text()').extract_first()
    # if pub_no is not None:
    #     pub_no = pub_no.strip()
    # else:
    #     pub_no = ""

    index_no = ''.join(res.xpath(
        '//div[contains(@class, "zwgk_comr1")]//strong[contains(text(),"索") and contains(text(),"引")]/ancestor::span/text()').extract()).strip()
    # index_no = ''.join(index_no_before.xpath('./following-sibling::td[1]/text()').extract()).strip()
    # index_no_before = res.xpath('//div[contains(@class, "zwgk_comr1")]//span[contains(string(),"索引号")]')

    # index_no = index_no_before.xpath('./following-sibling::td[1]/text()').extract_first()
    # if index_no is not None:
    #     index_no = index_no.strip()
    # else:
    #     index_no = ""

    # subject_before = res.xpath('//table[contains(@class, "page_tab")]//th[contains(text(),"主") and contains(text(),"类")]')
    # subject_before = res.xpath('//table[contains(@class, "m-detailtb")]//td[contains(string(),"主题分类")]')

    subject = ''.join(res.xpath(
        '//div[contains(@class, "zwgk_comr1")]//strong[contains(text(),"主") and contains(text(),"题") and contains(text(),"类")]/ancestor::span/text()').extract()).strip()
    # subject = subject_before.xpath('./following-sibling::td[1]/text()').extract_first()
    # if subject is not None:
    #     subject = subject.strip()
    # else:
    #     subject = ""

    # written_date_before = res.xpath('//div[contains(@class, "m-syh")]//td[contains(text(),"成") and contains(text(),"文") and contains(text(),"期")]')
    # written_date_before = res.xpath('//table[contains(@class, "m-detailtb")]//td[contains(string(),"成文日期")]')
    # if not written_date_before:
    #     written_date_before = res.xpath('//div[contains(@class, "m-detailtable")]//td[contains(string(),"成文日期")]')
    # written_date = written_date_before.xpath('./following-sibling::td[1]/text()').extract_first()
    written_date = ''.join(res.xpath(
        '//div[contains(@class, "zwgk_comr1")]//strong[contains(text(),"成") and contains(text(),"文") and contains(text(),"日")]/ancestor::span/text()').extract()).strip()
    # if written_date is not None:
    #     written_date = written_date.strip()
    # else:
    #     written_date = ""

    # legal_status_before = res.xpath('//div[contains(@class, "m-syh")]//td[contains(text(),"有") and contains(text(),"效") and contains(text(),"性")]')
    # legal_status_before = res.xpath('//table[contains(@class, "m-detailtb")]//td[contains(string(),"有效性")]')
    # legal_status = legal_status_before.xpath('./following-sibling::td[1]/text()').extract_first()
    legal_status = ''.join(res.xpath(
        '//div[contains(@class, "zwgk_comr1")]//strong[contains(text(),"时") and contains(text(),"效") and contains(text(),"性")]/ancestor::span/text()').extract()).strip()
    # if legal_status is not None:
    #     legal_status = legal_status.strip()
    # else:
    #     legal_status = ""

    # impl_date_before = res.xpath('//table[contains(@class, "m-detailtb")]//td[contains(string(),"生效日期")]')
    # if not impl_date_before:
    #     impl_date_before = res.xpath('//div[contains(@class, "m-detailtable")]//td[contains(string(),"生效日期")]')
    # impl_date = impl_date_before.xpath('./following-sibling::td[1]/text()').extract_first()
    # if impl_date is not None:
    #     impl_date = impl_date.strip()
    # else:
    #     impl_date = ""

    # invalid_date_before = res.xpath('//div[contains(@class, "table_suoyin")]//th[contains(text(),"废") and contains(text(),"止") and contains(text(),"间")]')
    # invalid_date_before = res.xpath('//table[contains(@class, "m-detailtb")]//td[contains(string(),"废止日期")]')
    # if not invalid_date_before:
    #     invalid_date_before = res.xpath('//div[contains(@class, "m-detailtable")]//td[contains(string(),"废止日期")]')
    # invalid_date = invalid_date_before.xpath('./following-sibling::td[1]/text()').extract_first()
    # if invalid_date is not None:
    #     invalid_date = invalid_date.strip()
    # else:
    #     invalid_date = ""

    # keyword_before = res.xpath('//div[contains(@class, "m-syh")]//td[contains(text(),"关") and contains(text(),"键")]')
    # keyword_before = res.xpath('//table[contains(@class, "m-detailtb")]//td[contains(string(),"关键词")]')
    # keyword = keyword_before.xpath('./following-sibling::td[1]/text()').extract_first()
    # if keyword is not None:
    #     keyword = keyword.strip()
    # else:
    #     keyword = ""

    # organ_before = res.xpath('//div[contains(@class, "m-syh")]//td[contains(text(),"发") and contains(text(),"布") and contains(text(),"构")]')
    # organ_before = res.xpath('//table[contains(@class, "m-detailtb")]//td[contains(string(),"发布机构")]')
    # organ = organ_before.xpath('./following-sibling::td[1]/text()').extract_first()
    organ = ''.join(res.xpath(
        '//div[contains(@class, "zwgk_comr1")]//strong[contains(text(),"发") and contains(text(),"机") and contains(text(),"关")]/ancestor::span/text()').extract()).strip()
    # if organ is not None:
    #     organ = organ.strip()
    # else:
    #     organ = ""
    # if not organ:
    #     organ_info = ''.join(res.xpath('//div[@class="wzfbxx_left fl"]/span[@class="aut"]//text()').extract()).strip()
    #     organ = organ_info.replace("发布机构：", "").strip()
    if organ.startswith('省'):
        organ = '海南' + organ

    fulltext_xpath = '//ucapcontent'
    fulltext = res.xpath(fulltext_xpath).extract_first()

    # if not fulltext:
    #     fulltext_xpath = '//div[contains(@class, "m-detailleft")]'
    #     fulltext = res.xpath(fulltext_xpath).extract_first()
    # if not fulltext:
    # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
    # fulltext_xpath = '//div[@id="zoom"]'
    # fulltext_xpath = '//ucapcontent'

    # fulltext_xpath2 = '//div[@class="is-downlist"]'
    # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    # fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99318'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "IITBHAINAN"
    zt_provider = "iitbhainangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['keyword'] = keyword
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 海南省科学技术厅
def policy_dosthainanlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info_before = res.xpath(
        #     '//div[@id="page_div"]/div//span/a[contains(text(), "末页")]/@href').extract_first()
        # page_info_before_list = page_info_before.split("_")
        # page_info = page_info_before_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
        page_info = re.findall("createPageHTML\(\d+, (\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) - 1 if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.css('div[class="con-right"] div[class="list_div"]')
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.css('a::attr(href)').get()
            title = li.css('a::text').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            # rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99319'
            url_before = f"http://dost.hainan.gov.cn/xxgk/{callmodel.sql_model.list_rawid}/{list_json['page_info']}.html"
            # url_before = "http://www.jingmen.gov.cn"
            article_json["url"] = parse.urljoin(url_before, url).strip()
            article_json["title"] = title.strip()
            pub_date = li.css('table td:first-of-type::text').get()
            article_json["pub_date"] = pub_date.replace('发布时间：', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_dosthainanlist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info_before = res.xpath(
        #     '//div[@id="page_div"]/div//span/a[contains(text(), "末页")]/@href').extract_first()
        # page_info_before_list = page_info_before.split("_")
        # page_info = page_info_before_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
        page_info = re.findall("m_nRecordCount = \"(\d+)\"", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = math.ceil(int(page_info[0]) / 20) - 1 if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="main-box02"]/table/tbody/tr')
        if not li_list:
            li_list = res.xpath('//table[@id="tablist"]/tbody/tr')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath('./td[@class="name"]//a/@data-tip_href').get()
            title = li.xpath('./td[@class="name"]//a/text()').get()
            if url is None:
                url = li.xpath('./td[@class="name"]//a/@href').get()
                if url is None:
                    continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            # rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99319'
            list_json = json.loads(callmodel.sql_model.list_json)
            url_before = f"http://dost.hainan.gov.cn/xxgk/xxgkzl/xxgkml/1893/{callmodel.sql_model.list_rawid}/{list_json['page_info']}.html"
            article_json["url"] = parse.urljoin(url_before, url).strip()
            article_json["title"] = title.strip()
            pub_date = li.xpath('./td[last()]/text()').get()
            # if not pub_date:
            #     pub_date = li.css('em::text').get()
            article_json["pub_date"] = pub_date.replace('发布时间：', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_dosthainanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_dosthainanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//ucaptitle//text()').extract()).strip()
    # if not title:
    #     title = ''.join(res.xpath('//div[@class="text-center u-title"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1//text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//td[contains(string(),"文号")]')
    # pub_no = ''.join(res.xpath('//div[contains(@class, "headInfo")]//strong[contains(text(),"文") and contains(text(),"号")]/ancestor::span/text()').extract()).strip()
    pub_no = ''.join(res.xpath(
        '//div[contains(@class, "headInfo")]//strong[contains(text(),"文") and contains(text(),"号")]/following-sibling::span/text()').extract()).strip()
    # pub_no = ''.join(pub_no_before.xpath('./following-sibling::td[1]/text()').extract()).strip()
    # pub_no = pub_no_before.xpath('./following-sibling::td[1]/text()').extract_first()
    # if pub_no is not None:
    #     pub_no = pub_no.strip()
    # else:
    #     pub_no = ""

    index_no = ''.join(res.xpath(
        '//div[contains(@class, "headInfo")]//strong[contains(text(),"索") and contains(text(),"引")]/following-sibling::span/text()').extract()).strip()
    # index_no = ''.join(index_no_before.xpath('./following-sibling::td[1]/text()').extract()).strip()
    # index_no_before = res.xpath('//div[contains(@class, "zwgk_comr1")]//span[contains(string(),"索引号")]')

    # index_no = index_no_before.xpath('./following-sibling::td[1]/text()').extract_first()
    # if index_no is not None:
    #     index_no = index_no.strip()
    # else:
    #     index_no = ""

    # subject_before = res.xpath('//table[contains(@class, "page_tab")]//th[contains(text(),"主") and contains(text(),"类")]')
    # subject_before = res.xpath('//table[contains(@class, "m-detailtb")]//td[contains(string(),"主题分类")]')

    subject = ''.join(res.xpath(
        '//div[contains(@class, "headInfo")]//strong[contains(text(),"分") and contains(text(),"类")]/following-sibling::span/text()').extract()).strip()
    # subject = subject_before.xpath('./following-sibling::td[1]/text()').extract_first()
    # if subject is not None:
    #     subject = subject.strip()
    # else:
    #     subject = ""

    # written_date_before = res.xpath('//div[contains(@class, "m-syh")]//td[contains(text(),"成") and contains(text(),"文") and contains(text(),"期")]')
    # written_date_before = res.xpath('//table[contains(@class, "m-detailtb")]//td[contains(string(),"成文日期")]')
    # if not written_date_before:
    #     written_date_before = res.xpath('//div[contains(@class, "m-detailtable")]//td[contains(string(),"成文日期")]')
    # written_date = written_date_before.xpath('./following-sibling::td[1]/text()').extract_first()
    # written_date = ''.join(res.xpath('//div[contains(@class, "zwgk_comr1")]//strong[contains(text(),"成") and contains(text(),"文") and contains(text(),"日")]/ancestor::span/text()').extract()).strip()
    # if written_date is not None:
    #     written_date = written_date.strip()
    # else:
    #     written_date = ""

    # legal_status_before = res.xpath('//div[contains(@class, "m-syh")]//td[contains(text(),"有") and contains(text(),"效") and contains(text(),"性")]')
    # legal_status_before = res.xpath('//table[contains(@class, "m-detailtb")]//td[contains(string(),"有效性")]')
    # legal_status = legal_status_before.xpath('./following-sibling::td[1]/text()').extract_first()
    legal_status = ''.join(res.xpath(
        '//div[contains(@class, "headInfo")]//strong[contains(text(),"时") and contains(text(),"效") and contains(text(),"性")]/following-sibling::span/text()').extract()).strip()
    # if legal_status is not None:
    #     legal_status = legal_status.strip()
    # else:
    #     legal_status = ""

    # impl_date_before = res.xpath('//table[contains(@class, "m-detailtb")]//td[contains(string(),"生效日期")]')
    # if not impl_date_before:
    #     impl_date_before = res.xpath('//div[contains(@class, "m-detailtable")]//td[contains(string(),"生效日期")]')
    # impl_date = impl_date_before.xpath('./following-sibling::td[1]/text()').extract_first()
    # if impl_date is not None:
    #     impl_date = impl_date.strip()
    # else:
    #     impl_date = ""

    # invalid_date_before = res.xpath('//div[contains(@class, "table_suoyin")]//th[contains(text(),"废") and contains(text(),"止") and contains(text(),"间")]')
    # invalid_date_before = res.xpath('//table[contains(@class, "m-detailtb")]//td[contains(string(),"废止日期")]')
    # if not invalid_date_before:
    #     invalid_date_before = res.xpath('//div[contains(@class, "m-detailtable")]//td[contains(string(),"废止日期")]')
    # invalid_date = invalid_date_before.xpath('./following-sibling::td[1]/text()').extract_first()
    # if invalid_date is not None:
    #     invalid_date = invalid_date.strip()
    # else:
    #     invalid_date = ""

    # keyword_before = res.xpath('//div[contains(@class, "m-syh")]//td[contains(text(),"关") and contains(text(),"键")]')
    # keyword_before = res.xpath('//table[contains(@class, "m-detailtb")]//td[contains(string(),"关键词")]')
    # keyword = keyword_before.xpath('./following-sibling::td[1]/text()').extract_first()
    keyword = ''.join(res.xpath(
        '//div[contains(@class, "headInfo")]//strong[contains(text(),"主") and contains(text(),"题") and contains(text(),"词")]/following-sibling::span/text()').extract()).strip()
    # if keyword is not None:
    #     keyword = keyword.strip()
    # else:
    #     keyword = ""

    # organ_before = res.xpath('//div[contains(@class, "m-syh")]//td[contains(text(),"发") and contains(text(),"布") and contains(text(),"构")]')
    # organ_before = res.xpath('//table[contains(@class, "m-detailtb")]//td[contains(string(),"发布机构")]')
    # organ = organ_before.xpath('./following-sibling::td[1]/text()').extract_first()
    organ = ''.join(res.xpath(
        '//div[contains(@class, "headInfo")]//strong[contains(text(),"发") and contains(text(),"机") and contains(text(),"关")]/following-sibling::span/text()').extract()).strip()
    # if organ is not None:
    #     organ = organ.strip()
    # else:
    #     organ = ""
    # if not organ:
    #     organ_info = ''.join(res.xpath('//div[@class="wzfbxx_left fl"]/span[@class="aut"]//text()').extract()).strip()
    #     organ = organ_info.replace("发布机构：", "").strip()
    if organ.startswith('省'):
        organ = '海南' + organ

    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()

    # if not fulltext:
    #     fulltext_xpath = '//div[contains(@class, "m-detailleft")]'
    #     fulltext = res.xpath(fulltext_xpath).extract_first()
    # if not fulltext:
    # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
    # fulltext_xpath = '//div[@id="zoom"]'
    # fulltext_xpath = '//ucapcontent'

    # fulltext_xpath2 = '//div[@class="is-downlist"]'
    # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    # fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99319'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "DOSTHAINAN"
    zt_provider = "dosthainangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['keyword'] = keyword
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 海南省教育厅
def policy_eduhainanlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info_before = res.xpath(
        #     '//div[@id="page_div"]/div//span/a[contains(text(), "末页")]/@href').extract_first()
        # page_info_before_list = page_info_before.split("_")
        # page_info = page_info_before_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
        page_info = re.findall("createPageHTML\('page_div',(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="flfg_038-01"]/li')
        # if not li_list:
        #     li_list = res.css('div[class="Fivelist"] ul li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath('./a/@href').get()
            title = li.xpath('./a/text()').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            # rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99320'
            article_json["url"] = "http://edu.hainan.gov.cn" + url
            article_json["title"] = title
            pub_date = li.xpath('./em/text()').get()
            # if not pub_date:
            #     pub_date = li.css('em::text').get()
            article_json["pub_date"] = pub_date.replace('发布时间：', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_eduhainanlist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info_before = res.xpath(
        #     '//div[@id="page_div"]/div//span/a[contains(text(), "末页")]/@href').extract_first()
        # page_info_before_list = page_info_before.split("_")
        # page_info = page_info_before_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
        page_info = re.findall("createPageHTML\('page_div',(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//tbody[@id="b"]/tr')
        # if not li_list:
        #     li_list = res.css('div[class="Fivelist"] ul li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath('./td[@class="line"]/a/@href').get()
            title = li.xpath('./td[@class="line"]/a/text()').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            # rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99320'
            article_json["url"] = "http://edu.hainan.gov.cn" + url
            article_json["title"] = title
            pub_date = li.xpath('./td[last()]/text()').get()
            # if not pub_date:
            #     pub_date = li.css('em::text').get()
            article_json["pub_date"] = pub_date.replace('发布时间：', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_eduhainanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_eduhainanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//ucaptitle//text()').extract()).strip()
    # if not title:
    #     title = ''.join(res.xpath('//div[@class="text-center u-title"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1//text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//td[contains(string(),"文号")]')
    pub_no = ''.join(
        res.xpath(
            '//div[contains(@class, "xlym2-m")]//i[contains(text(),"文") and contains(text(),"号")]/ancestor::span/text()').extract()).strip()
    # pub_no = ''.join(pub_no_before.xpath('./following-sibling::td[1]/text()').extract()).strip()
    # pub_no = pub_no_before.xpath('./following-sibling::td[1]/text()').extract_first()
    # if pub_no is not None:
    #     pub_no = pub_no.strip()
    # else:
    #     pub_no = ""

    index_no = ''.join(res.xpath(
        '//div[contains(@class, "xlym2-m")]//i[contains(text(),"索") and contains(text(),"引")]/ancestor::span/text()').extract()).strip()
    # index_no = ''.join(index_no_before.xpath('./following-sibling::td[1]/text()').extract()).strip()
    # index_no_before = res.xpath('//div[contains(@class, "zwgk_comr1")]//span[contains(string(),"索引号")]')

    # index_no = index_no_before.xpath('./following-sibling::td[1]/text()').extract_first()
    # if index_no is not None:
    #     index_no = index_no.strip()
    # else:
    #     index_no = ""

    # subject_before = res.xpath('//table[contains(@class, "page_tab")]//th[contains(text(),"主") and contains(text(),"类")]')
    # subject_before = res.xpath('//table[contains(@class, "m-detailtb")]//td[contains(string(),"主题分类")]')

    subject = ''.join(res.xpath(
        '//div[contains(@class, "xlym2-m")]//i[contains(text(),"分") and contains(text(),"类")]/ancestor::span/text()').extract()).strip()
    # subject = subject_before.xpath('./following-sibling::td[1]/text()').extract_first()
    # if subject is not None:
    #     subject = subject.strip()
    # else:
    #     subject = ""

    # written_date_before = res.xpath('//div[contains(@class, "m-syh")]//td[contains(text(),"成") and contains(text(),"文") and contains(text(),"期")]')
    # written_date_before = res.xpath('//table[contains(@class, "m-detailtb")]//td[contains(string(),"成文日期")]')
    # if not written_date_before:
    #     written_date_before = res.xpath('//div[contains(@class, "m-detailtable")]//td[contains(string(),"成文日期")]')
    # written_date = written_date_before.xpath('./following-sibling::td[1]/text()').extract_first()
    written_date = ''.join(res.xpath(
        '//div[contains(@class, "xlym2-m")]//i[contains(text(),"成") and contains(text(),"文") and contains(text(),"日")]/ancestor::span/text()').extract()).strip()
    # if written_date is not None:
    #     written_date = written_date.strip()
    # else:
    #     written_date = ""

    # legal_status_before = res.xpath('//div[contains(@class, "m-syh")]//td[contains(text(),"有") and contains(text(),"效") and contains(text(),"性")]')
    # legal_status_before = res.xpath('//table[contains(@class, "m-detailtb")]//td[contains(string(),"有效性")]')
    # legal_status = legal_status_before.xpath('./following-sibling::td[1]/text()').extract_first()
    legal_status = ''.join(res.xpath(
        '//div[contains(@class, "xlym2-m")]//i[contains(text(),"时") and contains(text(),"效")]/ancestor::span/text()').extract()).strip()
    # if legal_status is not None:
    #     legal_status = legal_status.strip()
    # else:
    #     legal_status = ""

    # impl_date_before = res.xpath('//table[contains(@class, "m-detailtb")]//td[contains(string(),"生效日期")]')
    # if not impl_date_before:
    #     impl_date_before = res.xpath('//div[contains(@class, "m-detailtable")]//td[contains(string(),"生效日期")]')
    # impl_date = impl_date_before.xpath('./following-sibling::td[1]/text()').extract_first()
    # if impl_date is not None:
    #     impl_date = impl_date.strip()
    # else:
    #     impl_date = ""

    # invalid_date_before = res.xpath('//div[contains(@class, "table_suoyin")]//th[contains(text(),"废") and contains(text(),"止") and contains(text(),"间")]')
    # invalid_date_before = res.xpath('//table[contains(@class, "m-detailtb")]//td[contains(string(),"废止日期")]')
    # if not invalid_date_before:
    #     invalid_date_before = res.xpath('//div[contains(@class, "m-detailtable")]//td[contains(string(),"废止日期")]')
    # invalid_date = invalid_date_before.xpath('./following-sibling::td[1]/text()').extract_first()
    # if invalid_date is not None:
    #     invalid_date = invalid_date.strip()
    # else:
    #     invalid_date = ""

    # keyword_before = res.xpath('//div[contains(@class, "m-syh")]//td[contains(text(),"关") and contains(text(),"键")]')
    # keyword_before = res.xpath('//table[contains(@class, "m-detailtb")]//td[contains(string(),"关键词")]')
    # keyword = keyword_before.xpath('./following-sibling::td[1]/text()').extract_first()
    # if keyword is not None:
    #     keyword = keyword.strip()
    # else:
    #     keyword = ""

    # organ_before = res.xpath('//div[contains(@class, "m-syh")]//td[contains(text(),"发") and contains(text(),"布") and contains(text(),"构")]')
    # organ_before = res.xpath('//table[contains(@class, "m-detailtb")]//td[contains(string(),"发布机构")]')
    # organ = organ_before.xpath('./following-sibling::td[1]/text()').extract_first()
    organ = ''.join(res.xpath(
        '//div[contains(@class, "xlym2-m")]//i[contains(text(),"发") and contains(text(),"机") and contains(text(),"关")]/ancestor::span/text()').extract()).strip()
    # if organ is not None:
    #     organ = organ.strip()
    # else:
    #     organ = ""
    # if not organ:
    #     organ_info = ''.join(res.xpath('//div[@class="wzfbxx_left fl"]/span[@class="aut"]//text()').extract()).strip()
    #     organ = organ_info.replace("发布机构：", "").strip()
    if organ.startswith('省'):
        organ = '海南' + organ

    fulltext_xpath = '//ucapcontent'
    fulltext = res.xpath(fulltext_xpath).extract_first()

    # if not fulltext:
    #     fulltext_xpath = '//div[contains(@class, "m-detailleft")]'
    #     fulltext = res.xpath(fulltext_xpath).extract_first()
    # if not fulltext:
    # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
    # fulltext_xpath = '//div[@id="zoom"]'
    # fulltext_xpath = '//ucapcontent'

    # fulltext_xpath2 = '//div[@class="is-downlist"]'
    # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    # fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99320'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "EDUHAINAN"
    zt_provider = "eduhainangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['keyword'] = keyword
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 海南省民政厅
def policy_mzhainanlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info_before = res.xpath(
        #     '//div[@id="page_div"]/div//span/a[contains(text(), "末页")]/@href').extract_first()
        # page_info_before_list = page_info_before.split("_")
        # page_info = page_info_before_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
        page_info = re.findall("createPageHTML\('page_div',(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="con-right fr"]/div')
        # if not li_list:
        #     li_list = res.css('div[class="Fivelist"] ul li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath('.//div[@class="list-right_title fon_1"]/a/@href').get()
            title = li.xpath('.//div[@class="list-right_title fon_1"]/a/text()').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            # rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99321'
            article_json["url"] = "http://mz.hainan.gov.cn" + url
            article_json["title"] = title
            pub_date = li.xpath('./table//tr/td[1]/text()').get()
            # if not pub_date:
            #     pub_date = li.css('em::text').get()
            article_json["pub_date"] = pub_date.replace('发布时间：', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_mzhainanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_mzhainanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//ucaptitle//text()').extract()).strip()
    # if not title:
    #     title = ''.join(res.xpath('//div[@class="text-center u-title"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1//text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//td[contains(string(),"文号")]')
    pub_no = ''.join(
        res.xpath(
            '//div[contains(@class, "zwgk_comr1")]//strong[contains(text(),"文") and contains(text(),"号")]/ancestor::span/text()').extract()).strip()
    # pub_no = ''.join(pub_no_before.xpath('./following-sibling::td[1]/text()').extract()).strip()
    # pub_no = pub_no_before.xpath('./following-sibling::td[1]/text()').extract_first()
    # if pub_no is not None:
    #     pub_no = pub_no.strip()
    # else:
    #     pub_no = ""

    index_no = ''.join(res.xpath(
        '//div[contains(@class, "zwgk_comr1")]//strong[contains(text(),"索") and contains(text(),"引")]/ancestor::span/text()').extract()).strip()
    # index_no = ''.join(index_no_before.xpath('./following-sibling::td[1]/text()').extract()).strip()
    # index_no_before = res.xpath('//div[contains(@class, "zwgk_comr1")]//span[contains(string(),"索引号")]')

    # index_no = index_no_before.xpath('./following-sibling::td[1]/text()').extract_first()
    # if index_no is not None:
    #     index_no = index_no.strip()
    # else:
    #     index_no = ""

    # subject_before = res.xpath('//table[contains(@class, "page_tab")]//th[contains(text(),"主") and contains(text(),"类")]')
    # subject_before = res.xpath('//table[contains(@class, "m-detailtb")]//td[contains(string(),"主题分类")]')

    subject = ''.join(res.xpath(
        '//div[contains(@class, "zwgk_comr1")]//strong[contains(text(),"主") and contains(text(),"分") and contains(text(),"类")]/ancestor::span/text()').extract()).strip()
    # subject = subject_before.xpath('./following-sibling::td[1]/text()').extract_first()
    # if subject is not None:
    #     subject = subject.strip()
    # else:
    #     subject = ""

    # written_date_before = res.xpath('//div[contains(@class, "m-syh")]//td[contains(text(),"成") and contains(text(),"文") and contains(text(),"期")]')
    # written_date_before = res.xpath('//table[contains(@class, "m-detailtb")]//td[contains(string(),"成文日期")]')
    # if not written_date_before:
    #     written_date_before = res.xpath('//div[contains(@class, "m-detailtable")]//td[contains(string(),"成文日期")]')
    # written_date = written_date_before.xpath('./following-sibling::td[1]/text()').extract_first()
    written_date = ''.join(res.xpath(
        '//div[contains(@class, "zwgk_comr1")]//strong[contains(text(),"成") and contains(text(),"文") and contains(text(),"日")]/ancestor::span/text()').extract()).strip()
    # if written_date is not None:
    #     written_date = written_date.strip()
    # else:
    #     written_date = ""

    # legal_status_before = res.xpath('//div[contains(@class, "m-syh")]//td[contains(text(),"有") and contains(text(),"效") and contains(text(),"性")]')
    # legal_status_before = res.xpath('//table[contains(@class, "m-detailtb")]//td[contains(string(),"有效性")]')
    # legal_status = legal_status_before.xpath('./following-sibling::td[1]/text()').extract_first()
    legal_status = ''.join(res.xpath(
        '//div[contains(@class, "zwgk_comr1")]//strong[contains(text(),"时") and contains(text(),"效")]/ancestor::span/text()').extract()).strip()
    # if legal_status is not None:
    #     legal_status = legal_status.strip()
    # else:
    #     legal_status = ""

    # impl_date_before = res.xpath('//table[contains(@class, "m-detailtb")]//td[contains(string(),"生效日期")]')
    # if not impl_date_before:
    #     impl_date_before = res.xpath('//div[contains(@class, "m-detailtable")]//td[contains(string(),"生效日期")]')
    # impl_date = impl_date_before.xpath('./following-sibling::td[1]/text()').extract_first()
    # if impl_date is not None:
    #     impl_date = impl_date.strip()
    # else:
    #     impl_date = ""

    # invalid_date_before = res.xpath('//div[contains(@class, "table_suoyin")]//th[contains(text(),"废") and contains(text(),"止") and contains(text(),"间")]')
    # invalid_date_before = res.xpath('//table[contains(@class, "m-detailtb")]//td[contains(string(),"废止日期")]')
    # if not invalid_date_before:
    #     invalid_date_before = res.xpath('//div[contains(@class, "m-detailtable")]//td[contains(string(),"废止日期")]')
    # invalid_date = invalid_date_before.xpath('./following-sibling::td[1]/text()').extract_first()
    # if invalid_date is not None:
    #     invalid_date = invalid_date.strip()
    # else:
    #     invalid_date = ""

    # keyword_before = res.xpath('//div[contains(@class, "m-syh")]//td[contains(text(),"关") and contains(text(),"键")]')
    # keyword_before = res.xpath('//table[contains(@class, "m-detailtb")]//td[contains(string(),"关键词")]')
    # keyword = keyword_before.xpath('./following-sibling::td[1]/text()').extract_first()
    # if keyword is not None:
    #     keyword = keyword.strip()
    # else:
    #     keyword = ""

    # organ_before = res.xpath('//div[contains(@class, "m-syh")]//td[contains(text(),"发") and contains(text(),"布") and contains(text(),"构")]')
    # organ_before = res.xpath('//table[contains(@class, "m-detailtb")]//td[contains(string(),"发布机构")]')
    # organ = organ_before.xpath('./following-sibling::td[1]/text()').extract_first()
    organ = ''.join(res.xpath(
        '//div[contains(@class, "zwgk_comr1")]//strong[contains(text(),"发") and contains(text(),"机") and contains(text(),"关")]/ancestor::span/text()').extract()).strip()
    # if organ is not None:
    #     organ = organ.strip()
    # else:
    #     organ = ""
    # if not organ:
    #     organ_info = ''.join(res.xpath('//div[@class="wzfbxx_left fl"]/span[@class="aut"]//text()').extract()).strip()
    #     organ = organ_info.replace("发布机构：", "").strip()
    if organ.startswith('省'):
        organ = '海南' + organ

    fulltext_xpath = '//ucapcontent'
    fulltext = res.xpath(fulltext_xpath).extract_first()

    # if not fulltext:
    #     fulltext_xpath = '//div[contains(@class, "m-detailleft")]'
    #     fulltext = res.xpath(fulltext_xpath).extract_first()
    # if not fulltext:
    # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
    # fulltext_xpath = '//div[@id="zoom"]'
    # fulltext_xpath = '//ucapcontent'

    # fulltext_xpath2 = '//div[@class="is-downlist"]'
    # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    # fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99321'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "MZHAINAN"
    zt_provider = "mzhainangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['keyword'] = keyword
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 海南省财政厅
def policy_mofhainanlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        page_info = re.findall("createPageHTML\('page_div',(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="flfg_038-01"]//li|//div[@class="tab-li"]')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('div[@class="tab-li-t"]/div[@class="tab-li-t-m"]/a/@href|a/@href').extract_first().strip()
            title = li.xpath('div[@class="tab-li-t"]/div[@class="tab-li-t-m"]/a/text()|a/text()').extract_first().strip()
            base_url = f'http://mof.hainan.gov.cn/sczt/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid_list = url.split('/')
            rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99322'
            article_json["url"] = url
            article_json["title"] = title
            pub_date = li.xpath('div[@class="tab-li-x"]/span[3]/text()|em/text()').extract_first().strip()
            article_json["pub_date"] = pub_date.replace('发布时间：', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_mofhainanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_mofhainanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//ucaptitle//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1//text()').extract()).strip()
    if '\n' in title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath(
        '//div[contains(@class, "xlym2-m")]//i[contains(text(),"文") and contains(text(),"号")]/ancestor::span/text()').extract()).strip()
    index_no = ''.join(res.xpath(
        '//div[contains(@class, "xlym2-m")]//i[contains(text(),"索") and contains(text(),"引")]/ancestor::span/text()').extract()).strip()
    subject = ''.join(res.xpath(
        '//div[contains(@class, "xlym2-m")]//i[contains(text(),"分") and contains(text(),"类")]/ancestor::span/text()').extract()).strip()
    written_date = ''.join(res.xpath(
        '//div[contains(@class, "xlym2-m")]//i[contains(text(),"成") and contains(text(),"文") and contains(text(),"日")]/ancestor::span/text()').extract()).strip()
    legal_status = ''.join(res.xpath(
        '//div[contains(@class, "xlym2-m")]//i[contains(text(),"时") and contains(text(),"效")]/ancestor::span/text()').extract()).strip()
    organ = ''.join(res.xpath(
        '//div[contains(@class, "xlym2-m")]//i[contains(text(),"发") and contains(text(),"机") and contains(text(),"关")]/ancestor::span/text()').extract()).strip()
    if organ.startswith('省'):
        organ = '海南' + organ

    fulltext_xpath = '//ucapcontent'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99322'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "MOFHAINAN"
    zt_provider = "mofhainangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['keyword'] = keyword
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


# 海南省人力资源和社会保障厅
def policy_hrsshainanlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info_before = res.xpath(
        #     '//div[@id="page_div"]/div//span/a[contains(text(), "末页")]/@href').extract_first()
        # page_info_before_list = page_info_before.split("_")
        # page_info = page_info_before_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
        page_info = re.findall("createPageHTML\('page_div',(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[contains(@class, "cen-div-1")]/div/div[contains(@class, "list_div")]')
        # if not li_list:
        #     li_list = res.css('div[class="Fivelist"] ul li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath('./div[contains(@class, "list-right_title")]/a/@href').get()
            title = li.xpath('./div[contains(@class, "list-right_title")]/a/text()').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            # rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99323'
            article_json["url"] = "http://hrss.hainan.gov.cn" + url
            article_json["title"] = title
            pub_date = li.xpath('./table//tr/td[1]/text()').get()
            # if not pub_date:
            #     pub_date = li.css('em::text').get()
            article_json["pub_date"] = pub_date.replace('发布时间：', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_hrsshainanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_hrsshainanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//ucaptitle//text()').extract()).strip()
    # if not title:
    #     title = ''.join(res.xpath('//div[@class="text-center u-title"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1//text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//div[contains(@class, "zwgk_comr1")]//strong[contains(text(),"文") and contains(text(),"号")]/ancestor::span/text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//div[contains(@class, "zwgk_comr1")]//strong[contains(text(),"索") and contains(text(),"引")]/ancestor::span/text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//div[contains(@class, "zwgk_comr1")]//strong[contains(text(),"主") and contains(text(),"分") and contains(text(),"类")]/ancestor::span/text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//div[contains(@class, "zwgk_comr1")]//strong[contains(text(),"成") and contains(text(),"文") and contains(text(),"日")]/ancestor::span/text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//div[contains(@class, "zwgk_comr1")]//strong[contains(text(),"时") and contains(text(),"效")]/ancestor::span/div/div/text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//div[contains(@class, "zwgk_comr1")]//strong[contains(text(),"发") and contains(text(),"机") and contains(text(),"关")]/ancestor::span/text()').extract()).strip()

    if organ.startswith('省'):
        organ = '海南' + organ

    fulltext_xpath = '//ucapcontent'
    fulltext = res.xpath(fulltext_xpath).extract_first()

    # if not fulltext:
    #     fulltext_xpath = '//div[contains(@class, "m-detailleft")]'
    #     fulltext = res.xpath(fulltext_xpath).extract_first()
    # if not fulltext:
    # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
    # fulltext_xpath = '//div[@id="zoom"]'
    # fulltext_xpath = '//ucapcontent'

    # fulltext_xpath2 = '//div[@class="is-downlist"]'
    # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    # fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99323'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "HRSSHAINAN"
    zt_provider = "hrsshainangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['keyword'] = keyword
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 海南省农业农村厅
def policy_agrihainanlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info_before = res.xpath(
        #     '//div[@id="page_div"]/div//span/a[contains(text(), "末页")]/@href').extract_first()
        # page_info_before_list = page_info_before.split("_")
        # page_info = page_info_before_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
        page_info = re.findall("createPageHTML\(\d+, (\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}", "list_rawid_alt": list_json["list_rawid_alt"]}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//div[contains(@class, "cen-div-1")]/div/div[contains(@class, "list_div")]')
        li_list = res.xpath('//ul[contains(@class, "flfg_038-01")]//li')
        # if not li_list:
        #     li_list = res.css('div[class="Fivelist"] ul li')
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:

            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath('./a/@href').get()
            title = li.xpath('./a/@title').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            # rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99324'
            list_rawid = callmodel.sql_model.list_rawid
            if list_rawid == "gfxwj":
                if url.__contains__("../"):
                    url_before = "http://agri.hainan.gov.cn/hnsnyt/xxgk"
                else:
                    url_before = "http://agri.hainan.gov.cn/hnsnyt/" + list_json["list_rawid_alt"]
            else:
                url_before = "http://agri.hainan.gov.cn/hnsnyt/" + list_json["list_rawid_alt"]
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title
            pub_date = li.xpath('./em/text()').get()
            # if not pub_date:
            #     pub_date = li.css('em::text').get()
            article_json["pub_date"] = pub_date.replace('发布时间：', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_agrihainanlist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info_before = res.xpath(
        #     '//div[@id="page_div"]/div//span/a[contains(text(), "末页")]/@href').extract_first()
        # page_info_before_list = page_info_before.split("_")
        # page_info = page_info_before_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
        # page_info = re.findall("PageContext\.PageNav\.go\(\d+,(\d+)\)", para_dicts["data"]["1_1"]['html'])
        # print(page_info)
        # if page_info:
        #     # max_count = re.findall('pageCount:(\d+),', page_info)
        #     # if not max_count:
        #     #     max_count = re.findall('\((\d+)\)', page_info)
        #     max_count = int(page_info[0]) if page_info else 1
        #     total_page = max_count
        # else:
        #     total_page = 1
        total_page = 8
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//table[@id="tablist"]/tbody/tr')
        # print(li_list)
        # if not li_list:
        #     li_list = res.css('div[class="Fivelist"] ul li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath('./td[@class="name"]/a/@href').get()
            title = li.xpath('./td[@class="name"]/a/text()').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            # rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99324'
            article_json["url"] = url
            article_json["title"] = title
            pub_date = li.xpath('./td[last()]/text()').get()
            if not pub_date:
                pub_date = ""
            # if not pub_date:
            #     pub_date = li.css('em::text').get()
            article_json["pub_date"] = pub_date.replace('发布时间：', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_agrihainanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_agrihainanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    # title = ''.join(res.xpath('//ucaptitle//text()').extract()).strip()
    # if not title:
    #     title = ''.join(res.xpath('//div[@class="text-center u-title"]//text()').extract()).strip()
    # if not title:
    #     title = ''.join(res.xpath('//h2//text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    # if not title:
    title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//div[contains(@class, "xxgk-syxl-t")]//i[contains(text(),"文") and contains(text(),"号")]/ancestor::span/text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//div[contains(@class, "xxgk-syxl-t")]//i[contains(text(),"索") and contains(text(),"引")]/ancestor::span/text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//div[contains(@class, "xxgk-syxl-t")]//i[contains(text(),"分") and contains(text(),"类")]/ancestor::span/text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//div[contains(@class, "xxgk-syxl-t")]//i[contains(text(),"成") and contains(text(),"文") and contains(text(),"日")]/ancestor::span/text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//div[contains(@class, "xxgk-syxl-t")]//i[contains(text(),"时") and contains(text(),"效")]/ancestor::span/text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//div[contains(@class, "xxgk-syxl-t")]//i[contains(text(),"发") and contains(text(),"机") and contains(text(),"关")]/ancestor::span/text()').extract()).strip()

    if organ.startswith('省'):
        organ = '海南' + organ

    fulltext_xpath = '//div[contains(@class, "xxgk-syxl-m-l-nr2")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()

    if not fulltext:
        fulltext_xpath = '//div[contains(@class, "conm")]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    # if not fulltext:
    # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
    # fulltext_xpath = '//div[@id="zoom"]'
    # fulltext_xpath = '//ucapcontent'

    # fulltext_xpath2 = '//div[@class="is-downlist"]'
    # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    # fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99324'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "AGRIHAINAN"
    zt_provider = "agrihainangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['keyword'] = keyword
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 海南省住房和城乡建设厅
def policy_zjthainanlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info_before = res.xpath(
        #     '//div[@id="page_div"]/div//span/a[contains(text(), "末页")]/@href').extract_first()
        # page_info_before_list = page_info_before.split("_")
        # page_info = page_info_before_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
        page_info = re.findall("createPageHTML\('page_div',(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[contains(@class, "con-right")]/div[contains(@class, "list_div")]')
        # if not li_list:
        #     li_list = res.css('div[class="Fivelist"] ul li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath('./div[contains(@class, "list-right_title")]/a/@href').get()
            title = li.xpath('./div[contains(@class, "list-right_title")]/a/text()').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            # rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99325'
            article_json["url"] = "http://zjt.hainan.gov.cn" + url
            article_json["title"] = title
            pub_date = li.xpath('./table//tr/td[1]/text()').get()
            # if not pub_date:
            #     pub_date = li.css('em::text').get()
            article_json["pub_date"] = pub_date.replace('发布时间：', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_zjthainanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_zjthainanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//ucaptitle//text()').extract()).strip()
    # if not title:
    #     title = ''.join(res.xpath('//div[@class="text-center u-title"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1//text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//div[contains(@class, "zwgk_comr1")]//strong[contains(text(),"文") and contains(text(),"号")]/ancestor::span/text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//div[contains(@class, "zwgk_comr1")]//strong[contains(text(),"索") and contains(text(),"引")]/ancestor::span/text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//div[contains(@class, "zwgk_comr1")]//strong[contains(text(),"主") and contains(text(),"分") and contains(text(),"类")]/ancestor::span/text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//div[contains(@class, "zwgk_comr1")]//strong[contains(text(),"成") and contains(text(),"文") and contains(text(),"日")]/ancestor::span/text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//div[contains(@class, "zwgk_comr1")]//strong[contains(text(),"时") and contains(text(),"效")]/ancestor::span/b/b/text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//div[contains(@class, "zwgk_comr1")]//strong[contains(text(),"发") and contains(text(),"机") and contains(text(),"关")]/ancestor::span/text()').extract()).strip()

    if organ.startswith('省'):
        organ = '海南' + organ

    fulltext_xpath = '//ucapcontent'
    fulltext = res.xpath(fulltext_xpath).extract_first()

    # if not fulltext:
    #     fulltext_xpath = '//div[contains(@class, "m-detailleft")]'
    #     fulltext = res.xpath(fulltext_xpath).extract_first()
    # if not fulltext:
    # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
    # fulltext_xpath = '//div[@id="zoom"]'
    # fulltext_xpath = '//ucapcontent'

    # fulltext_xpath2 = '//div[@class="is-downlist"]'
    # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    # fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99325'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "ZJTHAINAN"
    zt_provider = "zjthainangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['keyword'] = keyword
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 海南省卫生健康委员会
def policy_wsthainanlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info_before = res.xpath(
        #     '//div[@id="page_div"]/div//span/a[contains(text(), "末页")]/@href').extract_first()
        # page_info_before_list = page_info_before.split("_")
        # page_info = page_info_before_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
        page_info = re.findall("createPageHTML\((\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}", "list_rawid_alt": list_json["list_rawid_alt"]}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//div[contains(@class, "cen-div-1")]/div/div[contains(@class, "list_div")]')
        li_list = res.xpath('//ul[contains(@class, "flfg_03888")]//li')
        # if not li_list:
        #     li_list = res.css('div[class="Fivelist"] ul li')
        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:

            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath('./a/@href').get()
            title = li.xpath('./a/@title').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            # rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99326'
            list_rawid = callmodel.sql_model.list_rawid
            url_before = f"https://wst.hainan.gov.cn/swjw/{list_json['list_rawid_alt']}/{list_json['page_info']}.shtml"
            article_json["url"] = parse.urljoin(url_before, url).strip()
            article_json["title"] = title
            pub_date = li.xpath('./span/text()').get()
            # if not pub_date:
            #     pub_date = li.css('em::text').get()
            article_json["pub_date"] = pub_date.replace('发布时间：', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_wsthainanlist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info_before = res.xpath(
        #     '//div[@id="page_div"]/div//span/a[contains(text(), "末页")]/@href').extract_first()
        # page_info_before_list = page_info_before.split("_")
        # page_info = page_info_before_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
        # page_info = re.findall("PageContext\.PageNav\.go\(\d+,(\d+)\)", para_dicts["data"]["1_1"]['html'])
        # print(page_info)
        # if page_info:
        #     # max_count = re.findall('pageCount:(\d+),', page_info)
        #     # if not max_count:
        #     #     max_count = re.findall('\((\d+)\)', page_info)
        #     max_count = int(page_info[0]) if page_info else 1
        #     total_page = max_count
        # else:
        #     total_page = 1
        if callmodel.sql_model.list_rawid == "1998":
            total_page = 78
        elif callmodel.sql_model.list_rawid == "1896":
            total_page = 7
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)

        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}", "list_rawid_alt": list_json["list_rawid_alt"]}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//table[@id="tablist"]/tbody/tr')
        # print(li_list)
        # if not li_list:
        #     li_list = res.css('div[class="Fivelist"] ul li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath('./td[@class="name"]/a/@href').get()
            title = li.xpath('./td[@class="name"]/a/text()').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            # rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99326'
            article_json["url"] = url
            article_json["title"] = title
            pub_date = li.xpath('./td[last()]/text()').get()
            if not pub_date:
                pub_date = ""
            # if not pub_date:
            #     pub_date = li.css('em::text').get()
            article_json["pub_date"] = pub_date.replace('发布时间：', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_wsthainanlist3_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:

        p_datas = json.loads(para_dicts["data"]["1_1"]['html'])
        data_1 = p_datas["object"]
        total_num = data_1["num"]
        total_page = math.ceil(int(total_num) / 20)
        page_index = int(callmodel.sql_model.page_index)

        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": ""}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = data_1["xxgk"]
        # print(li_list)
        # if not li_list:
        #     li_list = res.css('div[class="Fivelist"] ul li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["DOCPUBURL"]
            title = li["DOCTITLE"]
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            # rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99326'
            article_json["url"] = url
            article_json["title"] = title
            pub_date = li["PUBDATE"]
            if not pub_date:
                pub_date = ""
            # if not pub_date:
            #     pub_date = li.css('em::text').get()
            article_json["pub_date"] = pub_date.replace('发布时间：', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_wsthainanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_wsthainanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    # title = ''.join(res.xpath('//ucaptitle//text()').extract()).strip()
    # if not title:
    #     title = ''.join(res.xpath('//div[@class="text-center u-title"]//text()').extract()).strip()
    # if not title:
    #     title = ''.join(res.xpath('//h2//text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    # if not title:
    title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//div[contains(@class, "xxgk-syxl-t")]//i[contains(text(),"文") and contains(text(),"号")]/ancestor::span/text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//div[contains(@class, "xxgk-syxl-t")]//i[contains(text(),"索") and contains(text(),"引")]/ancestor::span/text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//div[contains(@class, "xxgk-syxl-t")]//i[contains(text(),"主") and contains(text(),"分") and contains(text(),"类")]/ancestor::span/text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//div[contains(@class, "xxgk-syxl-t")]//i[contains(text(),"成") and contains(text(),"文") and contains(text(),"日")]/ancestor::span/text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//div[contains(@class, "xxgk-syxl-t")]//i[contains(text(),"时") and contains(text(),"效")]/ancestor::span/text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//div[contains(@class, "xxgk-syxl-t")]//i[contains(text(),"发") and contains(text(),"机") and contains(text(),"关")]/ancestor::span/text()').extract()).strip()

    if organ.startswith('省'):
        organ = '海南' + organ

    fulltext_xpath = '//div[contains(@class, "xxgk-syxl-m-l-nr2")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()

    if not fulltext:
        fulltext_xpath = '//div[contains(@class, "zx-xxxqy-nr")]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    # if not fulltext:
    # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
    # fulltext_xpath = '//div[@id="zoom"]'
    # fulltext_xpath = '//ucapcontent'

    # fulltext_xpath2 = '//div[@class="is-downlist"]'
    # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    # fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99326'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "WSTHAINAN"
    zt_provider = "wsthainangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['keyword'] = keyword
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 海南省海口市
def policy_haikoulist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        if callmodel.sql_model.list_rawid != "szfwj":
            page_info = re.findall("createPageHTML\((\d+),", para_dicts["data"]["1_1"]['html'])
        else:
            page_info = ["3"]
        if page_info:
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}", "last_name": list_json["last_name"],
                       "list_rawid_alt": list_json["list_rawid_alt"]}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        pub_date_path = ""
        if callmodel.sql_model.list_rawid != "szfwj":
            li_list = res.xpath('//div[contains(@class, "list-c")]/ul/li')
            url_path = "./p/a/@href"
            title_path = "./p/a/@title"
            pub_date_path = './p/span[@class="reltime"]/text()'
        else:
            li_list = res.xpath('//div[contains(@class, "gzk-list-cl")]/ul')
            url_path = './li[@class="c-bt"]/p/a/@href'
            title_path = './li[@class="c-bt"]/p/a/@title'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue
            list_rawid_alt = list_json["list_rawid_alt"]
            # if list_rawid == "gfxwj":
            if url.__contains__("../"):
                ddd = list_rawid_alt.split("/")
                kkk = ddd[-1]
                ooo = "/" + kkk
                url_before = "http://www.haikou.gov.cn/" + list_rawid_alt.replace(ooo, "")
            else:
                url_before = "http://www.haikou.gov.cn/" + list_rawid_alt
            rawid_list = url.split('/')
            rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99327'
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title
            if pub_date_path != "":
                pub_date = li.xpath(pub_date_path).get()
            else:
                pub_date = ""
            article_json["pub_date"] = pub_date.replace('发布时间：', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_haikouarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_haikouarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']

    res = Selector(text=html)
    title = ''.join(res.xpath('//h1//text()').extract()).strip()
    if '\n' in title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    def solve_callback(text):
        if text is None:
            return ""
        else:
            bbb = re.sub(".*：(.*)", "\\1", text)
            return bbb

    pub_no = ''.join(res.xpath(
        '//div[contains(@class, "maincon-info")]//div[contains(text(),"文") and contains(text(),"号")]/text()|//table[@id="headContainer"]//td[contains(string(),"文") and contains(string(),"号")]/span/text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//div[contains(@class, "maincon-info")]//div[contains(text(),"成") and contains(text(),"文") and contains(text(),"日")]/text()|//table[@id="headContainer"]//td[contains(string(),"文") and contains(string(),"发") and contains(string(),"日")]/span/text()').extract()).strip()

    pub_date = ''.join(res.xpath(
        '//div[contains(@class, "maincon-info")]//div[contains(text(),"发") and contains(text(),"布") and contains(text(),"日")]/text()').extract()).strip()
    pub_date = solve_callback(pub_date)
    if not pub_date:
        pub_date = article_json["pub_date"]
    pub_date = clean_pubdate(pub_date)
    pub_year = pub_date[:4]
    legal_status = ''.join(res.xpath(
        '//div[contains(@class, "maincon-info")]//div[contains(text(),"时") and contains(text(),"效")]/text()|//table[@id="headContainer"]//td[contains(string(),"效") and contains(string(),"时")]/span/text()').extract()).strip()

    keyword = ''.join(res.xpath(
        '//div[contains(@class, "maincon-info")]//div[contains(text(),"主") and contains(text(),"词")]/text()|//table[@id="headContainer"]//td[contains(string(),"主") and contains(string(),"词")]/span/text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//div[contains(@class, "maincon-info")]//div[contains(text(),"发") and contains(text(),"机") and contains(text(),"关")]/text()|//table[@id="headContainer"]//td[contains(string(),"发") and contains(string(),"布") and contains(string(),"构")]/span/text()').extract()).strip()

    if organ.startswith('市'):
        organ = '海口' + organ

    fulltext_xpath = '//div[contains(@id, "zl_Articel")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[contains(@class, "gzk-con-cc")]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[contains(@id, "ContentRegion")]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="zw"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99327'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "HAIKOU"
    zt_provider = "haikougovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = solve_callback(pub_no)
    data['organ'] = solve_callback(organ)
    # data['index_no'] = index_no
    written_date_before = solve_callback(written_date)
    data['written_date'] = clean_pubdate(written_date_before)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    data['keyword'] = clean_text(keyword)
    legal_status_before = solve_callback(legal_status)
    data['legal_status'] = clean_pubdate(legal_status_before)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 海南省三亚市
def policy_sanyalist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        page_info = re.findall("createPageHTML\('page_div',(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[contains(@class, "line_li")]/li')
        url_path = './a/@href'
        title_path = './a/@title'
        if not li_list:
            li_list = res.xpath('//ul[contains(@id, "isNew")]/li')
            url_path = './div/a/@href'
            title_path = './div/a/text()'
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99328'
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title
            pub_date = li.xpath("./div[@rel='发布日期']/text()").get()
            if not pub_date:
                pub_date = li.xpath('./em/text()').get()
            article_json["pub_date"] = pub_date.replace('发布时间：', '').replace('[', '').replace(']', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_sanyaarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_sanyaarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="pages_box"]//ucaptitle//text()').extract()).strip()
    if title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath(
        '//div[contains(@class, "date_list")]//span[contains(text(),"文") and contains(text(),"号")]/ancestor::li/text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//div[contains(@class, "date_list")]//span[contains(text(),"索") and contains(text(),"引")]/ancestor::li/text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//div[contains(@class, "date_list")]//span[contains(text(),"成") and contains(text(),"文") and contains(text(),"日")]/ancestor::li/text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//div[contains(@class, "date_list")]//span[contains(text(),"主") and contains(text(),"类")]/ancestor::li/text()').extract()).strip()

    keyword = ''.join(res.xpath(
        '//div[contains(@class, "date_list")]//span[contains(text(),"主") and contains(text(),"词")]/ancestor::li/text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//div[contains(@class, "date_list")]//span[contains(text(),"发") and contains(text(),"机") and contains(text(),"关")]/ancestor::li/text()').extract()).strip()

    if organ.startswith('市'):
        organ = '三亚' + organ

    fulltext_xpath = '//ucapcontent'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99328'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "SANYA"
    zt_provider = "sanyagovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    written_date_before = written_date
    data['written_date'] = clean_pubdate(written_date_before)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['keyword'] = keyword
    # legal_status_before = legal_status
    # data['legal_status'] = clean_pubdate(legal_status_before)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


# 海南省三沙市
def policy_sanshalist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info_before = res.xpath(
        #     '//div[@id="page_div"]/div//span/a[contains(text(), "末页")]/@href').extract_first()
        # page_info_before_list = page_info_before.split("_")
        # page_info = page_info_before_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")

        page_info = re.findall("createPageHTML\('page_div',(\d+),", para_dicts["data"]["1_1"]['html'])

        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[contains(@class, "con-right")]/div[contains(@class, "list_div")]')
        url_path = './div[contains(@class, "list-right_title")]/a/@href'
        title_path = './div[contains(@class, "list-right_title")]/a/@title'
        # pub_date_path = './p/span[@class="reltime"]/text()'
        # if not li_list:
        #     li_list = res.xpath('//ul[contains(@id, "isNew")]/li')
        #     url_path = './div/a/@href'
        #     title_path = './div/a/text()'
        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            url_before = "http://www.sansha.gov.cn"
            rawid_list = url.split('/')
            # rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99329'
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title
            pub_date = li.xpath("./table//td[1]/text()").get()
            if not pub_date:
                pub_date = li.xpath('./em/text()').get()
            article_json["pub_date"] = pub_date.replace('发布时间：', '').replace('[', '').replace(']', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_sanshaarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_sanshaarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)

    title = ''.join(res.xpath('//ucaptitle//text()').extract()).strip()

    if not title:
        title = article_json['title'].strip()

    fulltext_xpath = '//ucapcontent'
    fulltext = res.xpath(fulltext_xpath).extract_first()

    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99329'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "SANSHA"
    zt_provider = "sanshagovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')

    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 海南省儋州市
def policy_danzhoulist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        page_info = re.findall("createPageHTML\((\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}", "list_rawid_alt": list_json["list_rawid_alt"]}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//ul[contains(@class, "list")]/li')
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:

            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath('./a/@href').get()
            title = li.xpath('./a/text()').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99330'
            list_rawid = callmodel.sql_model.list_rawid
            if list_rawid == "zxjd":
                if url.__contains__("./"):
                    url_before = "https://www.danzhou.gov.cn/danzhou/jdhy/zcjd/zxjd"
                else:
                    url_before = ""
            else:
                if url.__contains__("../"):
                    url_before = "https://www.danzhou.gov.cn/danzhou"
                else:
                    url_before = "https://www.danzhou.gov.cn/danzhou/{}".format(list_json["list_rawid_alt"])
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/").replace("//", "/").replace(
                "//", "/").replace("http:/w", "http://w")
            article_json["title"] = title
            pub_date = li.xpath('./span/text()').get()
            article_json["pub_date"] = pub_date.replace('发布时间：', '').replace('[', '').replace(']', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_danzhoulist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html = para_dicts["data"]["1_1"]['html']
        html_json = json.loads(html)
        object_json = html_json["object"]
        total_count = object_json["num"]
        total_page = math.ceil(int(total_count) / 20)
        page_index = int(callmodel.sql_model.page_index)

        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        li_list = object_json["xxgk"]
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["DOCPUBURL"]
            title = li["DOCTITLE"]
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99330'
            article_json["url"] = url
            article_json["title"] = title
            pub_date = li["PUBDATE"]
            if not pub_date:
                pub_date = ""
            article_json["pub_date"] = clean_pubdate(pub_date)
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_danzhouarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_danzhouarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = article_json['title'].strip()
    if '...' in title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//li[contains(@class,"title")]/h1//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath(
        '//div[contains(@class, "xxgk-syxl-t")]/span/i[contains(text(),"文") and contains(text(),"号")]/ancestor::span/text()').extract()).strip()
    if pub_no == '无':
        pub_no = ''
    index_no = ''.join(res.xpath(
        '//div[contains(@class, "xxgk-syxl-t")]/span/i[contains(text(),"索") and contains(text(),"引")]/ancestor::span/text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//div[contains(@class, "xxgk-syxl-t")]/span/i[contains(text(),"分") and contains(text(),"类")]/ancestor::span/text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//div[contains(@class, "xxgk-syxl-t")]/span/i[contains(text(),"成") and contains(text(),"文") and contains(text(),"日")]/ancestor::span/text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//div[contains(@class, "xxgk-syxl-t")]/span/i[contains(text(),"时") and contains(text(),"效")]/ancestor::span/text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//div[contains(@class, "xxgk-syxl-t")]/span/i[contains(text(),"发") and contains(text(),"机") and contains(text(),"关")]/ancestor::span/text()').extract()).strip()

    if organ.startswith('市'):
        organ = '儋州' + organ

    fulltext_xpath = '//div[contains(@class, "xxgk-syxl-m-l-nr2")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()

    if not fulltext:
        fulltext_xpath = '//div[@id="neirongText"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    # if not fulltext:
    # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
    # fulltext_xpath = '//div[@id="zoom"]'
    # fulltext_xpath = '//ucapcontent'

    # fulltext_xpath2 = '//div[@class="is-downlist"]'
    # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    # fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99330'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "DANZHOU"
    zt_provider = "danzhougovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['keyword'] = keyword
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 江西省发展和改革委员会
def policy_drcjiangxilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = res.xpath('//totalpage/text()').extract_first()
        if page_info:
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//recordset/record')
        url_path = './/a/@href'
        title_path = './/a/text()'
        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            # rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99331'
            article_json["url"] = url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath("./span/text()").get()
            if not pub_date_before:
                pub_date_before = li.xpath(".//b/text()").get()
            article_json["pub_date"] = pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_drcjiangxiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_drcjiangxiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//p[@class="sp_title con-title"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1//text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//div[contains(@class, "xxgkTitle")]//td//b[contains(string(),"文") and contains(string(),"号")]/ancestor::td/text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//div[contains(@class, "xxgkTitle")]//td//b[contains(string(),"索") and contains(string(),"引")]/ancestor::td/text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//div[contains(@class, "xxgkTitle")]//td//b[contains(string(),"主") and contains(string(),"分") and contains(string(),"类")]/ancestor::td/text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//div[contains(@class, "xxgkTitle")]//td//b[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/ancestor::td/text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//div[contains(@class, "xxgkTitle")]//td//b[contains(string(),"发") and contains(string(),"机") and contains(string(),"关")]/ancestor::td/text()').extract()).strip()

    if organ.startswith('省'):
        organ = '江西' + organ

    fulltext_xpath = '//div[@id="Zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    # if not fulltext:
    #     fulltext_xpath = '//div[contains(@class, "m-detailleft")]'
    #     fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@id="zoom"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99331'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "DRCJIAGNGXI"
    zt_provider = "drcjiangxigovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    written_date_before = written_date
    data['written_date'] = clean_pubdate(written_date_before)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['keyword'] = keyword
    # legal_status_before = legal_status
    # data['legal_status'] = clean_pubdate(legal_status_before)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 江西省工业和信息化厅
def policy_jxciitlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info_before = res.xpath(
            '//div[@class="class_page1"]//a[contains(text(), "尾页")]/@href').extract_first()
        page_info = re.sub("Index_(.*?)\.aspx", "\\1", page_info_before)

        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[contains(@class, "c_main_lanmu")]//tr')
        url_path = './td/a/@href'
        title_path = './td/a/text()'
        # pub_date_path = './p/span[@class="reltime"]/text()'
        # if not li_list:
        #     li_list = res.xpath('//ul[contains(@id, "isNew")]/li')
        #     url_path = './div/a/@href'
        #     title_path = './div/a/text()'
        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            # print(url)
            title = li.xpath(title_path).get()
            if url is None:
                continue
            elif 'aspx' not in url:
                continue

            url_before = "http://wwww.jxciit.gov.cn"
            rawid_list = url.split('/')
            # rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "").replace(".aspx", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99332'
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date = li.xpath("./td[last()]/text()").get()
            # if not pub_date:
            #     pub_date = li.xpath('./em/text()').get()
            article_json["pub_date"] = pub_date.replace('发布时间：', '').replace('[', '').replace(']', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result

def policy_jxciitlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count / 25)

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1) * 25 + 1
                end = (page + 2) * 25
                if end >= max_count:
                    end = max_count
                dic = {"start": start, "end": end, "page_info": list_json["page_info"]}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('li/a/@href').extract_first()
            base_url = f'http://gxt.jiangxi.gov.cn/col/col68660/index.html'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99332'
            article_json["url"] = url
            article_json["title"] = li.xpath('li/a/text()').extract_first().strip()
            pub_date = li.xpath('li/b/text()').extract_first().strip()
            article_json["pub_date"] = pub_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jxciitarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jxciitarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//td[contains(text(),"发布时间：")]/preceding::tr[1]/td/font/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//td[@class="title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    fulltext_xpath = '//div[@id="fontzoom"]|//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()

    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99332'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "JXCIIT"
    zt_provider = "jxciitgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')

    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 江西省科学技术厅
def policy_kjtjiangxilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count / 25)

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1) * 25 + 1
                end = (page + 2) * 25
                if end >= max_count:
                    end = max_count
                dic = {"start": start, "end": end, "page_info": list_json["page_info"]}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href|td[1]/a/@href').extract_first()
            base_url = f'http://kjt.jiangxi.gov.cn/col/col27029/index.html'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99333'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()|td[1]/a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()|b/text()|td[4]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result

def policy_kjtjiangxiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_kjtjiangxiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//p[@class="sp_title con-title"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1//text()').extract()).strip()
    if len(title) >= 50:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath(
        '//div[contains(@class, "gsgg")]//td//b[contains(string(),"文") and contains(string(),"号")]/ancestor::td/text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//div[contains(@class, "gsgg")]//td//b[contains(string(),"索") and contains(string(),"引")]/ancestor::td/text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//div[contains(@class, "gsgg")]//td//b[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/ancestor::td/text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//div[contains(@class, "gsgg")]//td//b[contains(string(),"有") and contains(string(),"效")]/ancestor::td/text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//div[contains(@class, "gsgg")]//td//b[contains(string(),"发") and contains(string(),"机") and contains(string(),"关")]/ancestor::td/text()').extract()).strip()

    if organ.startswith('省'):
        organ = '江西' + organ

    fulltext_xpath = '//div[@class="new_m"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99333'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "KJTJIANGXI"
    zt_provider = "kjtjiangxigovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    written_date_before = written_date
    data['written_date'] = clean_pubdate(written_date_before)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['keyword'] = keyword
    legal_status_before = legal_status
    data['legal_status'] = clean_pubdate(legal_status_before)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 江西省教育厅
def policy_jytjiangxilist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = res.xpath('//totalpage/text()').extract_first()
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//recordset/record')
        url_path = './/a/@href'
        title_path = './/a/text()'
        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            # rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99334'
            article_json["url"] = url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath("./span/text()").get()
            if not pub_date_before:
                pub_date_before = li.xpath(".//b/text()").get()
            article_json["pub_date"] = pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jytjiangxilist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = res.xpath('//totalpage/text()').extract_first()
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}", "list_rawid_alt": list_json["list_rawid_alt"]}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//recordset/record')
        url_path = './/a/@href'
        title_path = './/a/text()'
        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            # rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99334'
            article_json["url"] = url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath("./span/text()").get()
            if not pub_date_before:
                pub_date_before = li.xpath(".//b/text()").get()
            article_json["pub_date"] = pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jytjiangxilist3_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info_before = res.xpath('//tr//a[contains(text(), "尾") and contains(text(), "页")]/@href').extract_first()
        page_info = re.sub(".*search.jsp',(.*?)\);", "\\1", page_info_before)
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="zfxxgk_zdgkc"]/ul/li')
        url_path = './a/@href'
        title_path = './a/@title'
        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            # rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            rawid = rawid_list[-1].replace(".html?xxgkhide=1", "").replace(".shtml", "").replace(".html", "").replace(
                ".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99334'
            article_json["url"] = url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath("./span/text()").get()
            if not pub_date_before:
                pub_date_before = li.xpath(".//b/text()").get()
            article_json["pub_date"] = pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jytjiangxiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jytjiangxiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)

    title = ''.join(res.xpath('//h1//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath(
        '//div[contains(@class, "xxgkTitle")]//td//b[contains(string(),"文") and contains(string(),"号")]/ancestor::td/text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//div[contains(@class, "xxgkTitle")]//td//b[contains(string(),"索") and contains(string(),"引")]/ancestor::td/text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//div[contains(@class, "xxgkTitle")]//td//b[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/ancestor::td/text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//div[contains(@class, "xxgkTitle")]//td//b[contains(string(),"发") and contains(string(),"机") and contains(string(),"构")]/ancestor::td/text()').extract()).strip()

    if organ.startswith('省'):
        organ = '江西' + organ

    fulltext_xpath = '//div[@class="new_m"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99334'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "JYTJIANGXI"
    zt_provider = "jytjiangxigovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    written_date_before = written_date
    data['written_date'] = clean_pubdate(written_date_before)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')

    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 江西省民政厅
def policy_mztjiangxilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = res.xpath('//totalpage/text()').extract_first()
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//recordset/record')
        url_path = './/a/@href'
        title_path = './/a/@title'
        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath("./a/div[@class='zctitle']/h3/text()").get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            # rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99335'
            article_json["url"] = url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath("./a/span[@class='tt fr']/text()").get()
            if not pub_date_before:
                pub_date_before = li.xpath(".//b/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_mztjiangxiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_mztjiangxiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//p[@class="nm"]//text()').extract()).strip()
    # if not title:
    #     title = ''.join(res.xpath('//div[@class="text-center u-title"]//text()').extract()).strip()
    # if not title:
    #     title = ''.join(res.xpath('//h1//text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    fulltext_xpath = '//div[@class="txt"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    # if not fulltext:
    #     fulltext_xpath = '//div[contains(@class, "m-detailleft")]'
    #     fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@class="content"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99335'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "MZTJIANGXI"
    zt_provider = "mztjiangxigovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 江西省财政厅
def policy_jxfjiangxilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = res.xpath('//totalpage/text()').extract_first()
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//recordset/record')
        url_path = './/a/@href'
        title_path = './/a/@title'
        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath("./a/div[@class='zctitle']/h3/text()").get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            # rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99336'
            article_json["url"] = url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath("./span[@class='bt-data-time']/text()").get()
            if not pub_date_before:
                pub_date_before = li.xpath(".//b/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jxfjiangxiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jxfjiangxiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="zw_title"]/p/text()').extract()).strip()
    # if not title:
    #     title = ''.join(res.xpath('//div[@class="text-center u-title"]//text()').extract()).strip()
    # if not title:
    #     title = ''.join(res.xpath('//h1//text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    fulltext_xpath = '//div[@class="txt"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    # if not fulltext:
    #     fulltext_xpath = '//div[contains(@class, "m-detailleft")]'
    #     fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@id="zoom"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99336'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "JXFJIANGXI"
    zt_provider = "jxfjiangxigovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 江西省人力资源和社会保障厅
def policy_rstjiangxilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = res.xpath('//totalpage/text()').extract_first()
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}", "sourceContentType": list_json["sourceContentType"]}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//recordset/record')
        url_path = './/a/@href'
        title_path = './/a/@title'
        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath("./a/div[@class='zctitle']/h3/text()").get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            # rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99337'
            article_json["url"] = url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath("./em[@class='date']/text()").get()
            if not pub_date_before:
                pub_date_before = li.xpath(".//b/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_rstjiangxiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_rstjiangxiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="newsinfo"]/h1/text()').extract()).strip()
    # if not title:
    #     title = ''.join(res.xpath('//div[@class="text-center u-title"]//text()').extract()).strip()
    # if not title:
    #     title = ''.join(res.xpath('//h1//text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//div[contains(@class, "xxgkTitle")]//td//b[contains(string(),"文") and contains(string(),"号")]/ancestor::td/text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//div[contains(@class, "xxgkTitle")]//td//b[contains(string(),"索") and contains(string(),"引")]/ancestor::td/text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//div[contains(@class, "xxgkTitle")]//td//b[contains(string(),"主") and contains(string(),"分") and contains(string(),"类")]/ancestor::td/text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//div[contains(@class, "xxgkTitle")]//td//b[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/ancestor::td/text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//div[contains(@class, "xxgkTitle")]//td//b[contains(string(),"发") and contains(string(),"机") and contains(string(),"关")]/ancestor::td/text()').extract()).strip()
    if organ.startswith('省'):
        organ = '江西' + organ

    fulltext_xpath = '//div[@class="main-wrapper"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    # if not fulltext:
    #     fulltext_xpath = '//div[contains(@class, "m-detailleft")]'
    #     fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@id="zoom"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99337'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "RSTJIANGXI"
    zt_provider = "rstjiangxigovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['written_date'] = clean_pubdate(written_date)
    data['organ'] = organ

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 江西省农业农村厅
def policy_nyncjiangxilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = res.xpath('//totalpage/text()').extract_first()
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//recordset/record')
        url_path = './/a/@href'
        title_path = './/a/@title'
        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath("./a/div[@class='zctitle']/h3/text()").get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            # rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99788'
            article_json["url"] = url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            # pub_date_before = li.xpath("./span[@class='bt-data-time']/text()").get()
            # if not pub_date_before:
            #     pub_date_before = li.xpath(".//b/text()").get()
            # if not pub_date_before:
            #     pub_date_before = ""
            # article_json["pub_date"] = clean_pubdate(pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_nyncjiangxiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_nyncjiangxiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    # pub_date = article_json['pub_date']

    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="newsinfo"]/h1/text()').extract()).strip()
    # if not title:
    #     title = ''.join(res.xpath('//div[@class="text-center u-title"]//text()').extract()).strip()
    # if not title:
    #     title = ''.join(res.xpath('//h1//text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//ul[contains(@class, "metalist")]//li/span[contains(string(),"文") and contains(string(),"号")]/following-sibling::span/text()').extract()).strip()

    pub_date = ''.join(res.xpath(
        '//span[@name="publishDate"]/text()').extract()).strip()
    if not pub_date:
        pub_date = res.xpath('//div[contains(@class, "sp_time")]/font[contains(text(), "发布时间")]/text()').extract_first()
    pub_date = clean_pubdate(pub_date)
    pub_year = pub_date[:4]
    index_no = ''.join(res.xpath(
        '//ul[contains(@class, "metalist")]//li/span[contains(string(),"索") and contains(string(),"引")]/following-sibling::span/text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//ul[contains(@class, "metalist")]//li/span[contains(string(),"主") and contains(string(),"词")]/following-sibling::span/text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//ul[contains(@class, "metalist")]//li/span[contains(string(),"主") and contains(string(),"分") and contains(string(),"类")]/following-sibling::span/text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//ul[contains(@class, "metalist")]//li/span[contains(string(),"发") and contains(string(),"机") and contains(string(),"构")]/following-sibling::span/text()').extract()).strip()

    if organ.startswith('省'):
        organ = '江西' + organ

    fulltext_xpath = '//div[@id="div_content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    # if not fulltext:
    #     fulltext_xpath = '//div[contains(@class, "m-detailleft")]'
    #     fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@id="zoom"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99788'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "NYNCJIANGXI"
    zt_provider = "nyncjiangxigovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['organ'] = organ
    data['keyword'] = keyword

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 江西省住房和城乡建设厅
def policy_jxjstlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = res.xpath('//totalpage/text()').extract_first()
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//recordset/record')
        url_path = './/a/@href'
        title_path = './/a/@title'
        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath("./a/div[@class='zctitle']/h3/text()").get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            # rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99338'
            article_json["url"] = url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath("./span[@class='bt-data-time']/text()").get()
            if not pub_date_before:
                pub_date_before = li.xpath(".//b/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jxjstarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jxjstarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="newsinfo"]/h1/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if not pub_date:
        pub_date_info = ''.join(res.xpath('//div[@class="L"]/span/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    if not pub_date:
        raise Exception
    pub_no = ''.join(res.xpath(
        '//div[contains(@class, "metalist")]//li//span[contains(string(),"文") and contains(string(),"号")]/ancestor::td/text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//div[contains(@class, "xxgkTitle")]//td//b[contains(string(),"索") and contains(string(),"引")]/ancestor::td/text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//div[contains(@class, "xxgkTitle")]//td//b[contains(string(),"主") and contains(string(),"分") and contains(string(),"类")]/ancestor::td/text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//div[contains(@class, "xxgkTitle")]//td//b[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/ancestor::td/text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//div[contains(@class, "xxgkTitle")]//td//b[contains(string(),"发") and contains(string(),"机") and contains(string(),"关")]/ancestor::td/text()').extract()).strip()

    if organ.startswith('省'):
        organ = '江西' + organ

    fulltext_xpath = '//div[@class="____article_content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99338'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "JXJST"
    zt_provider = "jxjstgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 江西省卫生健康委员会
def policy_hcjiangxilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = res.xpath('//totalpage/text()').extract_first()
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//recordset/record')
        url_path = './/a/@href'
        title_path = './/a/@title'
        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath("./a/div[@class='zctitle']/h3/text()").get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            # rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99339'
            article_json["url"] = url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath("./span[@class='bt-data-time']/text()").get()
            if not pub_date_before:
                pub_date_before = li.xpath(".//b/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_hcjiangxiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_hcjiangxiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="zw_title"]/p[1]/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//div[contains(@class, "metalist")]//li//span[contains(string(),"文") and contains(string(),"号")]/ancestor::td/text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//div[contains(@class, "xxgkTitle")]//td//b[contains(string(),"索") and contains(string(),"引")]/ancestor::td/text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//div[contains(@class, "xxgkTitle")]//td//b[contains(string(),"主") and contains(string(),"分") and contains(string(),"类")]/ancestor::td/text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//div[contains(@class, "xxgkTitle")]//td//b[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/ancestor::td/text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//div[contains(@class, "xxgkTitle")]//td//b[contains(string(),"发") and contains(string(),"机") and contains(string(),"关")]/ancestor::td/text()').extract()).strip()

    if organ.startswith('省'):
        organ = '江西' + organ

    fulltext_xpath = '//div[@class="____article_content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    # if not fulltext:
    #     fulltext_xpath = '//div[contains(@class, "m-detailleft")]'
    #     fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@id="zoom"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99339'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "HCJIANGXI"
    zt_provider = "hcjiangxigovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


#   江西省南昌市
def policy_nclist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            allrow = html_json['data']['page']['allRow']
            total_page = math.ceil(allrow/15)
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['data']['page']['list']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            rawid = li['MANUSCRIPT_ID']
            base_url = 'http://www.nc.gov.cn/ncszf/qtygwj/zfxxgk_list_tj.shtml'
            href = li['URL_COMP']
            url = parse.urljoin(base_url, href)
            # rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99340'
            article_json["url"] = url
            article_json["title"] = li['TITLE']
            article_json["pub_date"] = li['PUBLISHED_TIME_FORMATED']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_nclist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            max_count = re.findall("createPageHTML\('page_div',(\d+)", para_dicts["data"]["1_1"]['html'])
            total_page = int(max_count[0]) if max_count else 1
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f'{list_json["page_info"]}_{page}'}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//div[contains(@class,"pageList")]/ul/li|//ul[@class="pageList newsList"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('h4/a/@href|a/@href').extract_first()
            base_url = f'http://www.nc.gov.cn/ncszf{callmodel.sql_model.list_rawid}/2021_nav_list.shtml'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99340'
            article_json["url"] = url
            article_json["title"] = li.xpath('h4/a/text()|a/span/text()|a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('h4/span/text()|span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_ncarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_ncarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//ucaptitle//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="meta-data"]//font[contains(text(),"文件编号")]/following::div[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="meta-data"]//font[contains(text(),"索") and contains(text(),"号")]/following::div[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="meta-data"]//font[contains(text(),"主题分类")]/following::div[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="meta-data"]//font[contains(text(),"生成日期")]/following::div[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="meta-data"]//font[contains(text(),"有效性")]/following::div[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="meta-data"]//font[contains(text(),"发布机构")]/following::div[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '南昌' + organ

    fulltext_xpath = '//div[@id="zoomcon"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99340'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "NC"
    zt_provider = "ncgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


# 江西省九江市
def policy_jiujianglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:

        total_page = 100
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}", "list_rawid_alt": list_json["list_rawid_alt"]}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        pub_date_path = ""
        li_list = res.xpath('//div[@class="list col-md-9"]//tr/td[@class="td1"]')
        url_path = './a/@href'
        title_path = './a/text()'
        if not li_list:
            li_list = res.xpath('//ul[@class="info-list"]/li')
            pub_date_path = './span/text()'
        if not li_list:
            li_list = res.xpath('//div[@class="contentRight"]/ul/li')
            pub_date_path = './span/text()'
        list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            url_before = f"https://www.jiujiang.gov.cn/{list_json['list_rawid_alt']}/{list_json['page_info']}.html"
            rawid_list = url.split('/')
            rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99341'
            article_json["url"] = parse.urljoin(url_before, url).strip()
            article_json["title"] = title.strip()
            if pub_date_path != "":
                pub_date = li.xpath(pub_date_path).get()
            else:
                pub_date = ""
            article_json["pub_date"] = pub_date.replace('发布时间：', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jiujiangarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jiujiangarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']

    res = Selector(text=html)
    title = ''.join(res.xpath('//p[@class="mainTitle"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="zwy"]/h3//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath(
        '//div[contains(@class, "infoType")]//li//span[contains(string(),"文") and contains(string(),"号")]/text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//div[contains(@class, "infoType")]//li//span[contains(string(),"索") and contains(string(),"取")]/text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//div[contains(@class, "infoType")]//li//span[contains(string(),"主") and contains(string(),"分") and contains(string(),"类")]/text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//div[contains(@class, "infoType")]//li//span[contains(string(),"成") and contains(string(),"生") and contains(string(),"日")]/text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//div[contains(@class, "infoType")]//li//span[contains(string(),"时") and contains(string(),"效")]/text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//div[contains(@class, "infoType")]//li//span[contains(string(),"发") and contains(string(),"机") and contains(string(),"关")]/text()').extract()).strip()

    pub_date = ''.join(res.xpath(
        '//span[contains(@class, "infoTypeTime")]/text()').extract()).strip()
    pub_date = clean_pubdate(pub_date)
    pub_year = pub_date[:4]

    if organ.startswith('市'):
        organ = '九江' + organ

    fulltext_xpath = '//div[contains(@class, "zwy-cont")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@id="artibody"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99341'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "JIUJIANG"
    zt_provider = "jiujianggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no

    data['subject'] = subject
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


# 江西省上饶市
def policy_zgsrlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        page_info = re.findall("createPageHTML\('page_div',(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@id="doclist"]/li')

        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath('./div[@class="zcwj-wj"]/a/@href').get()
            title = li.xpath('./div[@class="zcwj-wj"]/a/text()').get()
            if url is None:
                continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            # rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99342'
            article_json["url"] = "http://www.zgsr.gov.cn" + url
            article_json["title"] = title
            pub_date = li.xpath('span[@class="span_date"]/text()').get()
            article_json["pub_date"] = pub_date.replace('发布时间：', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_zgsrarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_zgsrarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//ucaptitle//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath(
        '//ul[contains(@class, "ztfl")]//li//span[contains(string(),"文") and contains(string(),"号")]/following-sibling::span/text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//ul[contains(@class, "ztfl")]//li//span[contains(string(),"索") and contains(string(),"引")]/following-sibling::span/text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//ul[contains(@class, "ztfl")]//li//span[contains(string(),"主") and contains(string(),"分") and contains(string(),"类")]/following-sibling::span/text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//ul[contains(@class, "ztfl")]//li//span[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::span/text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//ul[contains(@class, "ztfl")]//li//span[contains(string(),"时") and contains(string(),"效")]/following-sibling::span/text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//ul[contains(@class, "ztfl")]//li//span[contains(string(),"发") and contains(string(),"机") and contains(string(),"关")]/following-sibling::span/text()').extract()).strip()

    if organ.startswith('市'):
        organ = '上饶' + organ

    fulltext_xpath = '//ucapcontent'
    fulltext = res.xpath(fulltext_xpath).extract_first()

    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99342'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "ZGSR"
    zt_provider = "zgsrgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['keyword'] = keyword
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 江西省抚州市
def policy_jxfzlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = res.xpath('//totalpage/text()').extract_first()
        if page_info:
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//recordset/record')
        url_path = './div[2]/a/@href'
        title_path = './div[2]/a/p[1]/text()'
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath("./a/text()").get()
            if url is None:
                url = li.xpath("./a/@href").get()
                if url is None:
                    continue
            elif 'htm' not in url:
                continue
            rawid_list = url.split('/')
            rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99343'
            if url.startswith("/"):
                url_before = "http://www.jxfz.gov.cn"
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath("./span[@class='bt-data-time']/text()").get()
            if not pub_date_before:
                pub_date_before = li.xpath(".//b/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jxfzarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jxfzarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//p[@id="title"]/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1//text()').extract()).strip()

    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath(
        '//ul[contains(@class, "wh")]//li//b[contains(string(),"文") and contains(string(),"号")]/ancestor::p/text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//ul[contains(@class, "wh")]//li//b[contains(string(),"索") and contains(string(),"引")]/ancestor::p/text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//ul[contains(@class, "wh")]//li//b[contains(string(),"主") and contains(string(),"分") and contains(string(),"类")]/ancestor::p/text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//ul[contains(@class, "wh")]//li//b[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/ancestor::p/text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//ul[contains(@class, "wh")]//li//b[contains(string(),"有") and contains(string(),"效")]/ancestor::p/text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//ul[contains(@class, "wh")]//li//b[contains(string(),"发") and contains(string(),"机") and contains(string(),"关")]/ancestor::p/text()').extract()).strip()

    if organ.startswith('市'):
        organ = '抚州' + organ

    fulltext_xpath = '//div[@class="____article_content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99343'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "JXFZ"
    zt_provider = "jxfzgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


# 江西省宜春市
def policy_yichunlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = re.findall("createPageHTML\('page_div',(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[contains(@class, "pageList")]/ul/li')
        if not li_list:
            li_list = res.xpath('//ul[contains(@class, "pageList")]/li')
        url_path = './h4/a[1]/@href'
        title_path = './h4/a[1]/@title'
        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath("./a/@title").get()
            if url is None:
                url = li.xpath("./a/@href").get()
                if url is None:
                    continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            # rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99344'
            if url.startswith("/"):
                url_before = "https://www.yichun.gov.cn"
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath("./span[@class='time']/text()").get()
            if not pub_date_before:
                pub_date_before = li.xpath(".//b/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_yichunarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_yichunarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//ucaptitle/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if not pub_date:
        pub_date_info = ''.join(res.xpath('//publishtime/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    if not pub_date:
        raise Exception
    pub_no = ''.join(res.xpath(
        '//div[contains(@class, "meta-data")]//dd//label[contains(string(),"文") and contains(string(),"号")]/following-sibling::div/text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//div[contains(@class, "meta-data")]//dd//label[contains(string(),"索") and contains(string(),"引")]/following-sibling::div/text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//div[contains(@class, "meta-data")]//dd//label[contains(string(),"主") and contains(string(),"分") and contains(string(),"类")]/following-sibling::div/text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//div[contains(@class, "meta-data")]//dd//label[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::div/text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//div[contains(@class, "meta-data")]//dd//label[contains(string(),"时") and contains(string(),"效")]/following-sibling::div/text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//div[contains(@class, "meta-data")]//dd//label[contains(string(),"发") and contains(string(),"机") and contains(string(),"关")]/following-sibling::div/text()').extract()).strip()

    if organ.startswith('市'):
        organ = '抚州' + organ

    fulltext_xpath = '//ucapcontent'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99344'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "YICHUN"
    zt_provider = "yichungovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


#   江西省吉安市
def policy_jianlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(re.findall('\{.*\}',para_dicts["data"]["1_1"]['html'])[0])
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            allrow = html_json['total']
            total_page = math.ceil(allrow/20)
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['data']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            base_url = 'https://www.jian.gov.cn/news-list-gonggaogongshi.html'
            href = li['url']
            url = parse.urljoin(base_url, href)
            if 'jian' not in url or 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99345'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['inputtime']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jianarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jianarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    # title = ''.join(res.xpath('//ucaptitle//text()').extract()).strip()
    # if not title:
    #     title = article_json['title'].strip()
    if 'id="pre_1"' in html:
        pub_no = ''.join(res.xpath('//div[@id="pre_1"]//strong[contains(text(),"文件编号")]/parent::span[1]/text()').extract()).strip()
        index_no = ''.join(res.xpath('//div[@id="pre_1"]//strong[contains(text(),"索 引 号")]/parent::span[1]/text()').extract()).strip()
        subject = ''.join(res.xpath('//div[@id="pre_1"]//strong[contains(text(),"信息类别")]/parent::span[1]/text()').extract()).strip()
        written_date = ''.join(res.xpath('//div[@id="pre_1"]//strong[contains(text(),"生成日期")]/parent::span[1]/text()').extract()).strip()
        legal_status = ''.join(res.xpath('//div[@id="pre_1"]//strong[contains(text(),"有 效 性")]/parent::span[1]/text()').extract()).strip()
        organ = ''.join(res.xpath('//div[@id="pre_1"]//strong[contains(text(),"责任部门")]/parent::span[1]/text()').extract()).strip()
    else:
        pub_no = ''.join(res.xpath('//table[@class="table"]//td[contains(text(),"文件编号")]/following::td[1]/text()').extract()).strip()
        index_no = ''.join(res.xpath('//table[@class="table"]//td[contains(text(),"索 引 号")]/following::td[1]/text()').extract()).strip()
        subject = ''.join(res.xpath('//table[@class="table"]//td[contains(text(),"分 类")]/following::td[1]/text()').extract()).strip()
        written_date = ''
        legal_status = ''.join(res.xpath('//table[@class="table"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
        organ = ''.join(res.xpath('//table[@class="table"]//td[contains(text(),"责任部门")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '吉安' + organ

    fulltext_xpath = '//div[@class="xxgk_content"]|//div[@class="content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99345'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "JIAN"
    zt_provider = "jiangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


# 江西省赣州市
def policy_ganzhoulist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = re.findall("createPageHTML\('page_div',(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[contains(@class, "pageList")]/li')
        if not li_list:
            li_list = res.xpath('//ul[contains(@class, "pageList")]/li')
        url_path = './h4/a[1]/@href'
        title_path = './h4/a[1]/@title'
        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath("./a/@title").get()
            if url is None:
                url = li.xpath("./a/@href").get()
                if url is None:
                    continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            # rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99346'
            if url.startswith("/"):
                url_before = "https://www.ganzhou.gov.cn"
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath("./h4/span[@class='time']/text()").get()
            if not pub_date_before:
                pub_date_before = li.xpath(".//b/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_ganzhoulist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = re.findall("createPageHTML\('page_div',(\d+),", para_dicts["data"]["1_1"]['html'])
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[contains(@class, "pageList")]/ul/li')
        if not li_list:
            li_list = res.xpath('//ul[contains(@class, "pageList")]/li')
        url_path = './a[1]/@href'
        title_path = './a[1]/@title'
        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath("./span[@class='bt']/a/@title").get()
            if url is None:
                url = li.xpath("./span[@class='bt']/a/@href").get()
                if url is None:
                    continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            # rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99346'
            if url.startswith("/"):
                url_before = "https://www.ganzhou.gov.cn"
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath("./span[@class='time']/text()").get()
            if not pub_date_before:
                pub_date_before = li.xpath("./span[@class='bt']/p[@class='zcwjtime']/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_ganzhouarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_ganzhouarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//ucaptitle/text()').extract()).strip()
    # if not title:
    #     title = ''.join(res.xpath('//div[@class="text-center u-title"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1//text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//div[contains(@class, "meta-data")]//dd//label[contains(string(),"文") and contains(string(),"号")]/following-sibling::div/text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//div[contains(@class, "meta-data")]//dd//label[contains(string(),"索") and contains(string(),"引")]/following-sibling::div/text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//div[contains(@class, "meta-data")]//dd//label[contains(string(),"主") and contains(string(),"分") and contains(string(),"类")]/following-sibling::div/text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//div[contains(@class, "meta-data")]//dd//label[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::div/text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//div[contains(@class, "meta-data")]//dd//label[contains(string(),"时") and contains(string(),"效")]/following-sibling::div/text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//div[contains(@class, "meta-data")]//dd//label[contains(string(),"发") and contains(string(),"机") and contains(string(),"关")]/following-sibling::div/text()').extract()).strip()

    if organ.startswith('市'):
        organ = '赣州' + organ

    fulltext_xpath = '//ucapcontent'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    # if not fulltext:
    #     fulltext_xpath = '//div[contains(@class, "m-detailleft")]'
    #     fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@id="zoom"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99346'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "GANZHOU"
    zt_provider = "ganzhougovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 江西省新余市
def policy_xinyulist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info = re.findall("createPageHTML\('page_div',(\d+),", para_dicts["data"]["1_1"]['html'])
        # if page_info:
        #     # max_count = re.findall('pageCount:(\d+),', page_info)
        #     # if not max_count:
        #     #     max_count = re.findall('\((\d+)\)', page_info)
        #     max_count = int(page_info[0]) if page_info else 1
        #     total_page = max_count
        # else:
        #     total_page = 1
        total_page = 50
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[contains(@class, "list_right_news")]/ul/li')
        if not li_list:
            li_list = res.xpath('//div[contains(@class, "channel-title")]/ul/li')
        url_path = './a/@href'
        title_path = './a/text()'
        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath("./a/@title").get()
            if url is None:
                url = li.xpath("./a/@href").get()
                if url is None:
                    continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            # rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99349'
            if url.startswith("/"):
                url_before = "http://www.xinyu.gov.cn"
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath("./span/text()").get()
            if not pub_date_before:
                pub_date_before = li.xpath(".//b/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_xinyuarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_xinyuarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//ucaptitle/text()').extract()).strip()
    # if not title:
    #     title = ''.join(res.xpath('//div[@class="text-center u-title"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1//text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//div[contains(@class, "meta-data")]//dd//label[contains(string(),"文") and contains(string(),"号")]/following-sibling::div/text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//div[contains(@class, "meta-data")]//dd//label[contains(string(),"索") and contains(string(),"引")]/following-sibling::div/text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//div[contains(@class, "meta-data")]//dd//label[contains(string(),"主") and contains(string(),"分") and contains(string(),"类")]/following-sibling::div/text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//div[contains(@class, "meta-data")]//dd//label[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::div/text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//div[contains(@class, "meta-data")]//dd//label[contains(string(),"时") and contains(string(),"效")]/following-sibling::div/text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//div[contains(@class, "meta-data")]//dd//label[contains(string(),"发") and contains(string(),"机") and contains(string(),"构")]/following-sibling::div/text()').extract()).strip()

    if organ.startswith('市'):
        organ = '新余' + organ

    fulltext_xpath = '//ucapcontent'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    # if not fulltext:
    #     fulltext_xpath = '//div[contains(@class, "m-detailleft")]'
    #     fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@id="zoom"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99349'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "XINYU"
    zt_provider = "xinyugovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


#   江西省鹰潭市
def policy_yingtanlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count / 25)

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1) * 25 + 1
                end = (page + 2) * 25
                if end >= max_count:
                    end = max_count
                dic = {"start": start, "end": end, "page_info": list_json['page_info']}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.yingtan.gov.cn/col/col50/index.html?uid=495&pageNum=3'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99350'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span[@class="bt-data-time"]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_yingtanlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall(';共&nbsp;(\d+)&nbsp;页', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"start": start, "end": end, "page_info": list_json["page_info"]}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//div[@class="zfxxgk_zdgkc"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.yingtan.gov.cn/col/col13446/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99350'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('b/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_yingtanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_yingtanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//p[@id="title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="pcxx"]//span[contains(text(),"文") and contains(text(),"号")]/parent::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="pcxx"]//span[contains(text(),"索")]/parent::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="pcxx"]//span[contains(text(),"组配分类")]/parent::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="pcxx"]//span[contains(text(),"成文日期")]/parent::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="pcxx"]//span[contains(text(),"发文机关")]/parent::td[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '鹰潭' + organ

    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99350'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "YINGTAN"
    zt_provider = "yingtangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['organ'] = organ
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['subject'] = subject
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


# 广东省发展和改革委员会
def policy_drcgdlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        all_data = json.loads(para_dicts["data"]["1_1"]['html'])
        total = all_data["total"]
        total_page = math.ceil(int(total) / 100)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = all_data["articles"]

        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["url"]
            title = li["title"]
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            # rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99351'

            article_json["url"] = url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()

            article_json["pub_date"] = clean_pubdate(
                li["created_at"].replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_drcgdlist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info_before = res.xpath("//div[@id='pages']//a[@class='last']/@href").extract_first()
        page_info = re.sub(".*index_(\d+)\.html", "\\1", page_info_before)
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//ul[contains(@class, "comlist1")]/li')
        li_list = res.xpath('//ul[@class="comlist1 mt10 "]/li')
        if not li_list:
            li_list = res.xpath('//div[contains(@class, "channel-title")]/ul/li')
        url_path = './a/@href'
        title_path = './a/@title'
        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath("./a/text()").get()
            if url is None:
                url = li.xpath("./a/@href").get()
                if url is None:
                    continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            # rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99351'
            if url.startswith("/"):
                url_before = "http://drc.gd.gov.cn"
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath("./span/text()").get()
            if not pub_date_before:
                pub_date_before = li.xpath(".//b/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_drcgdarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_drcgdarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//ucaptitle/text()').extract()).strip()
    # if not title:
    #     title = ''.join(res.xpath('//div[@class="text-center u-title"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1[@class="title document-number"]//text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//div[contains(@class, "classify")]//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]/text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//div[contains(@class, "classify")]//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]/text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//div[contains(@class, "classify")]//td[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]/text()').extract()).strip()

    keyword = ''.join(res.xpath(
        '//div[contains(@class, "classify")]//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath(
        '//div[contains(@class, "classify")]//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]/text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//div[contains(@class, "classify")]//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]/text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//div[contains(@class, "classify")]//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"构")]/following-sibling::td[1]/text()').extract()).strip()

    if organ.startswith('省'):
        organ = '广东' + organ

    fulltext_xpath = '//div[@id="content1"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    # if not fulltext:
    #     fulltext_xpath = '//div[contains(@class, "m-detailleft")]'
    #     fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@class="content"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99351'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "DRCGD"
    zt_provider = "drcgdgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['keyword'] = keyword
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 广东省工业和信息化厅
def policy_gdiigdlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info_before = res.xpath("//div[@class='Pages']//a[@class='last']/@href").extract_first()
        page_info = re.sub(".*index_(\d+)\.html", "\\1", page_info_before)
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="NewsList removebr"]/li')
        if not li_list:
            li_list = res.xpath('//ul[contains(@class, "pageList")]/li')
        url_path = './a[1]/@href'
        title_path = './a[1]/@title'
        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath("./a/@title").get()
            if url is None:
                url = li.xpath("./a/@href").get()
                if url is None:
                    continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            # rawid = "{}_{}".format(rawid_list[-2], rawid_list[-1].replace(".html", ""))
            rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99352'
            if url.startswith("/"):
                url_before = "http://gdii.gd.gov.cn"
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath("./span/text()").get()
            if not pub_date_before:
                pub_date_before = li.xpath(".//b/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_gdiigdarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_gdiigdarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//ucaptitle/text()').extract()).strip()
    # if not title:
    #     title = ''.join(res.xpath('//div[@class="text-center u-title"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1/text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]/text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]/text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]/text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//td[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]/text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]/text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]/text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"构")]/following-sibling::td[1]/text()').extract()).strip()

    if organ.startswith('省'):
        organ = '广东' + organ

    fulltext_xpath = '//ucapcontent'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    # if not fulltext:
    #     fulltext_xpath = '//div[contains(@class, "m-detailleft")]'
    #     fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@id="zoom"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99352'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "GDIIGD"
    zt_provider = "gdiigdgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['organ'] = organ
    data['keyword'] = keyword
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 广东省发展和改革委员会
def policy_gdstcgdlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        all_data = json.loads(para_dicts["data"]["1_1"]['html'])
        total = all_data["total"]
        total_page = math.ceil(int(total) / 100)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = all_data["articles"]

        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["url"]
            title = li["title"]
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99353'

            article_json["url"] = url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()

            article_json["pub_date"] = clean_pubdate(
                li["created_at"].replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_gdstcgdlist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info_before = res.xpath("//div[@class='page']//a[@class='last']/@href").extract_first()
        page_info = re.sub(".*index_(\d+)\.html", "\\1", page_info_before)
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//ul[contains(@class, "comlist1")]/li')
        li_list = res.xpath('//ul[@class="list"]/li')
        if not li_list:
            li_list = res.xpath('//div[contains(@class, "channel-title")]/ul/li')
        url_path = './a/@href'
        title_path = './a/@title'
        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath("./a/text()").get()
            if url is None:
                url = li.xpath("./a/@href").get()
                if url is None:
                    continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99353'
            if url.startswith("/"):
                url_before = "http://gdstc.gd.gov.cn"
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath("./span[@class='time']/text()").get()
            if not pub_date_before:
                pub_date_before = li.xpath(".//b/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_gdstcgdarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_gdstcgdarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//h3[@class="zw-title"]/text()').extract()).strip()
    # if not title:
    #     title = ''.join(res.xpath('//div[@class="text-center u-title"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1[@class="title document-number"]//text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]//text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]//text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//td[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"构")]/following-sibling::td[1]//text()').extract()).strip()

    if organ.startswith('省'):
        organ = '广东' + organ

    fulltext_xpath = '//div[@class="article-content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    # if not fulltext:
    #     fulltext_xpath = '//div[contains(@class, "m-detailleft")]'
    #     fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@class="zw"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99353'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "GDSTCGD"
    zt_provider = "gdstcgdgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['keyword'] = keyword
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 广东省教育厅
def policy_edugdlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        all_data = json.loads(para_dicts["data"]["1_1"]['html'])
        total = all_data["total"]
        total_page = math.ceil(int(total) / 100)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = all_data["articles"]

        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["url"]
            title = li["title"]
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99354'

            article_json["url"] = url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()

            article_json["pub_date"] = clean_pubdate(
                li["created_at"].replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_edugdlist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info_before = res.xpath("//div[@class='pages']//a[@class='last']/@href").extract_first()
        page_info = re.sub(".*index_(\d+)\.html", "\\1", page_info_before)
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//ul[contains(@class, "comlist1")]/li')
        li_list = res.xpath('//ul[@class="list"]/li')
        if not li_list:
            li_list = res.xpath('//div[contains(@class, "channel-title")]/ul/li')
        url_path = './a/@href'
        title_path = './a/@title'
        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath("./a/text()").get()
            if url is None:
                url = li.xpath("./a/@href").get()
                if url is None:
                    continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99354'
            if url.startswith("/"):
                url_before = "http://edu.gd.gov.cn"
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath("./span/text()").get()
            if not pub_date_before:
                pub_date_before = li.xpath(".//b/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_edugdarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_edugdarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//h3/text()').extract()).strip()
    # if not title:
    #     title = ''.join(res.xpath('//div[@class="text-center u-title"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1[@class="title document-number"]//text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//div[@class="classify"]//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]//text()').extract()).strip()
    if pub_no == '无':
        pub_no = ''
    index_no = ''.join(res.xpath(
        '//div[@class="classify"]//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]//text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//div[@class="classify"]//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//div[@class="classify"]//td[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//div[@class="classify"]//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//div[@class="classify"]//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//div[@class="classify"]//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"构")]/following-sibling::td[1]//text()').extract()).strip()

    if organ.startswith('省'):
        organ = '广东' + organ

    fulltext_xpath = '//div[@class="article"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    # if not fulltext:
    #     fulltext_xpath = '//div[contains(@class, "m-detailleft")]'
    #     fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@class="article-content"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99354'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "EDUGD"
    zt_provider = "edugdgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['keyword'] = keyword
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 广东省民政厅
def policy_smztgdlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        all_data = json.loads(para_dicts["data"]["1_1"]['html'])
        total = all_data["total"]
        total_page = math.ceil(int(total) / 100)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = all_data["articles"]

        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["url"]
            title = li["title"]
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99355'

            article_json["url"] = url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()

            article_json["pub_date"] = clean_pubdate(
                li["created_at"].replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_smztgdlist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info_before = res.xpath("//div[@class='page']//a[@class='last']/@href").extract_first()
        page_info = re.sub(".*index_(\d+)\.html", "\\1", page_info_before)
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//ul[contains(@class, "comlist1")]/li')
        li_list = res.xpath('//ul[@class="News_list"]/li')
        if not li_list:
            li_list = res.xpath('//div[contains(@class, "channel-title")]/ul/li')
        url_path = './a/@href'
        title_path = './a/@title'
        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath("./a/text()").get()
            if url is None:
                url = li.xpath("./a/@href").get()
                if url is None:
                    continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99355'
            if url.startswith("/"):
                url_before = "http://smzt.gd.gov.cn"
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./span[@class="time"]/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath(".//b/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_smztgdarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_smztgdarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="info_title"]/text()').extract()).strip()
    # if not title:
    #     title = ''.join(res.xpath('//div[@class="text-center u-title"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1[@class="title document-number"]//text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]//text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]//text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//td[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"构")]/following-sibling::td[1]//text()').extract()).strip()

    if organ.startswith('省'):
        organ = '广东' + organ

    fulltext_xpath = '//div[@id="zoomcon"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    # if not fulltext:
    #     fulltext_xpath = '//div[contains(@class, "m-detailleft")]'
    #     fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@class="article-content"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99355'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "SMZTGD"
    zt_provider = "smztgdgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['keyword'] = keyword
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 广东省财政厅
def policy_cztgdlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        all_data = json.loads(para_dicts["data"]["1_1"]['html'])
        total = all_data["total"]
        total_page = math.ceil(int(total) / 100)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = all_data["articles"]

        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["url"]
            title = li["title"]
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99356'

            article_json["url"] = url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()

            article_json["pub_date"] = clean_pubdate(
                li["created_at"].replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cztgdlist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info_before = res.xpath("//div[@class='pages']//a[@class='last']/@href").extract_first()
        page_info = re.sub(".*index_(\d+)\.html", "\\1", page_info_before)
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//ul[contains(@class, "comlist1")]/li')
        li_list = res.xpath('//ul[@class="lists_ty"]/li')
        if not li_list:
            li_list = res.xpath('//div[contains(@class, "channel-title")]/ul/li')
        url_path = './a/@href'
        title_path = './a/@title'
        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath(".//a/text()").get()
            if url is None:
                url = li.xpath(".//a/@href").get()
                if url is None:
                    continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99356'
            if url.startswith("/"):
                url_before = "http://czt.gd.gov.cn"
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./span[@class="time"]/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./div[@class='lists_time']/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cztgdarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_cztgdarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="title document-number"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath(
        '//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]//text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]//text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//td[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"构")]/following-sibling::td[1]//text()').extract()).strip()

    if organ.startswith('省'):
        organ = '广东' + organ

    fulltext_xpath = '//div[@class="article-content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99356'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CZTGD"
    zt_provider = "cztgdgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['keyword'] = keyword
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 广东省人力资源和社会保障厅
def policy_hrssgdlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        all_data = json.loads(para_dicts["data"]["1_1"]['html'])
        total = all_data["total"]
        total_page = math.ceil(int(total) / 100)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = all_data["articles"]

        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["url"]
            title = li["title"]
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99357'

            article_json["url"] = url.replace("../", "/").replace("./", "/").strip()
            article_json["title"] = title.strip()

            article_json["pub_date"] = clean_pubdate(
                li["created_at"].replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_hrssgdlist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info_before = res.xpath("//div[@class='pages']//a[@class='last']/@href").extract_first()
        page_info = re.sub(".*index_(\d+)\.html", "\\1", page_info_before)
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//ul[contains(@class, "comlist1")]/li')
        li_list = res.xpath('//ul[@class="list"]/li')
        if not li_list:
            li_list = res.xpath('//div[contains(@class, "channel-title")]/ul/li')
        url_path = './a/@href'
        title_path = './a/@title'
        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath(".//a/text()").get()
            if url is None:
                url = li.xpath(".//a/@href").get()
                if url is None:
                    continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99357'
            if url.startswith("/"):
                url_before = "http://hrss.gd.gov.cn"
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/").strip()
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./span[@class="pubDate"]/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./div[@class='lists_time']/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_hrssgdarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_hrssgdarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="title document-number"]//text()').extract()).strip()

    # if not title:
    #     title = ''.join(res.xpath('//div[@class="text-center u-title"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1/text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]//text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]//text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//td[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"构")]/following-sibling::td[1]//text()').extract()).strip()

    if organ.startswith('省'):
        organ = '广东' + organ

    fulltext_xpath = '//div[@class="content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    # if not fulltext:
    #     fulltext_xpath = '//div[contains(@class, "m-detailleft")]'
    #     fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@class="article_con"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99357'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "HRSSGD"
    zt_provider = "hrssgdgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['keyword'] = keyword
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 广东省农业农村厅
def policy_daragdlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        all_data = json.loads(para_dicts["data"]["1_1"]['html'])
        total = all_data["total"]
        total_page = math.ceil(int(total) / 100)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = all_data["articles"]

        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["url"]
            title = li["title"]
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99358'

            article_json["url"] = url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()

            article_json["pub_date"] = clean_pubdate(
                li["created_at"].replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_daragdlist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info_before = res.xpath("//div[@class='page']//a[@class='last']/@href").extract_first()
        page_info = re.sub(".*index_(\d+)\.html", "\\1", page_info_before)
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//ul[contains(@class, "comlist1")]/li')
        li_list = res.xpath('//ul[@class="list"]/li')
        if not li_list:
            li_list = res.xpath('//div[@id="zxpt"]/div')
        url_path = './a/@href'
        title_path = './a/@title'
        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath(".//a/text()").get()
            if url is None:
                url = li.xpath(".//a/@href").get()
                if url is None:
                    continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99358'
            if url.startswith("/"):
                url_before = "http://dara.gd.gov.cn"
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./p/span[1]/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./div[@class='lists_time']/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_daragdarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_daragdarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="title document-number"]//text()').extract()).strip()

    # if not title:
    #     title = ''.join(res.xpath('//div[@class="text-center u-title"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h2/text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]//text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]//text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//td[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"构")]/following-sibling::td[1]//text()').extract()).strip()

    if organ.startswith('省'):
        organ = '广东' + organ

    fulltext_xpath = '//div[@class="content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    # if not fulltext:
    #     fulltext_xpath = '//div[contains(@class, "m-detailleft")]'
    #     fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@id="content"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99358'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "DARAGD"
    zt_provider = "daragdgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['keyword'] = keyword
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 广东省住房和城乡建设厅
def policy_zfcxjstgdlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        all_data = json.loads(para_dicts["data"]["1_1"]['html'])
        total = all_data["total"]
        total_page = math.ceil(int(total) / 100)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = all_data["articles"]

        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["url"]
            title = li["title"]
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99359'

            article_json["url"] = url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()

            article_json["pub_date"] = clean_pubdate(
                li["created_at"].replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_zfcxjstgdlist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info_before = res.xpath("//div[@class='page']//a[@class='last']/@href").extract_first()
        # page_info = re.sub(".*index_(\d+)\.html", "\\1", page_info_before)
        # if page_info:
        #     # max_count = re.findall('pageCount:(\d+),', page_info)
        #     # if not max_count:
        #     #     max_count = re.findall('\((\d+)\)', page_info)
        #     max_count = int(page_info) if page_info else 1
        #     total_page = max_count
        # else:
        #     total_page = 1
        total_page = 55
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//ul[contains(@class, "comlist1")]/li')
        li_list = res.xpath('//ul[@class="news_group_list"]/li')
        if not li_list:
            li_list = res.xpath('//div[@id="zxpt"]/div')
        url_path = './a/@href'
        title_path = './a/@title'
        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath(".//a/h5/text()").get()
            if url is None:
                url = li.xpath(".//a/@href").get()
                if url is None:
                    continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99359'
            if url.startswith("/"):
                url_before = "http://zfcxjst.gd.gov.cn"
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./a//h6/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./div[@class='lists_time']/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_zfcxjstgdarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_zfcxjstgdarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="title document-number"]//text()').extract()).strip()

    # if not title:
    #     title = ''.join(res.xpath('//div[@class="text-center u-title"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="news-title"]/text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]//text()').extract()).strip()
    if not pub_no:
        pub_no = ''.join(res.xpath('//li[contains(string(),"文") and contains(string(),"号")]/text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]//text()').extract()).strip()
    if not index_no:
        index_no = ''.join(
            res.xpath('//li[contains(string(),"索") and contains(string(),"引")]/text()').extract()).strip()

    keyword = ''.join(res.xpath(
        '//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]//text()').extract()).strip()
    if not keyword:
        keyword = ''.join(res.xpath('//li[contains(string(),"主") and contains(string(),"词")]/text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//td[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]//text()').extract()).strip()
    if not subject:
        subject = ''.join(res.xpath('//li[contains(string(),"分") and contains(string(),"类")]/text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()
    if not written_date:
        written_date = ''.join(res.xpath(
            '//li[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]//text()').extract()).strip()
    if not legal_status:
        legal_status = ''.join(
            res.xpath('//li[contains(string(),"时") and contains(string(),"效")]/text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"构")]/following-sibling::td[1]//text()').extract()).strip()
    if not organ:
        organ = ''.join(res.xpath(
            '//li[contains(string(),"发") and contains(string(),"机") and contains(string(),"构")]/text()').extract()).strip()

    if organ.startswith('省'):
        organ = '广东' + organ

    fulltext_xpath = '//div[@class="news-article"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    # if not fulltext:
    #     fulltext_xpath = '//div[contains(@class, "m-detailleft")]'
    #     fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="article-content"]'
        # fulltext_xpath = '//div[@id="content"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99359'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "ZFCXJSTGD"
    zt_provider = "zfcxjstgdgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['keyword'] = keyword
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 广东省卫生健康委员会
def policy_wsjkwgdlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        all_data = json.loads(para_dicts["data"]["1_1"]['html'])
        total = all_data["total"]
        total_page = math.ceil(int(total) / 100)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = all_data["articles"]

        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["url"]
            title = li["title"]
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99360'

            article_json["url"] = url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()

            article_json["pub_date"] = clean_pubdate(
                li["created_at"].replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_wsjkwgdlist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info_before = res.xpath("//div[@class='page']//a[@class='last']/@href").extract_first()
        page_info = re.sub(".*index_(\d+)\.html", "\\1", page_info_before)
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//ul[contains(@class, "comlist1")]/li')
        li_list = res.xpath('//div[@class="section list"]/ul/li')
        if not li_list:
            li_list = res.xpath('//div[@id="zxpt"]/div')
        url_path = './a/@href'
        title_path = './a/@title'
        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath(".//a/text()").get()
            if url is None:
                url = li.xpath(".//a/@href").get()
                if url is None:
                    continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99360'
            if url.startswith("/"):
                url_before = "http://wsjkw.gd.gov.cn"
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./span/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./div[@class='lists_time']/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_wsjkwgdarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_wsjkwgdarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="title document-number"]//text()').extract()).strip()

    # if not title:
    #     title = ''.join(res.xpath('//div[@class="text-center u-title"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1[@class="text_cencer font24 margin_top20 c282828"]/text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]//text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]//text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//td[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"构")]/following-sibling::td[1]//text()').extract()).strip()

    if organ.startswith('省'):
        organ = '广东' + organ

    fulltext_xpath = '//div[@class="content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    # if not fulltext:
    #     fulltext_xpath = '//div[contains(@class, "m-detailleft")]'
    #     fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@class="content-content"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99360'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "WSJKWGD"
    zt_provider = "wsjkwgdgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['keyword'] = keyword
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 广东省广州市
def policy_gzlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        all_data = json.loads(para_dicts["data"]["1_1"]['html'])
        total = all_data["total"]
        total_page = math.ceil(int(total) / 100)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = all_data["articles"]

        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["url"]
            title = li["title"]
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99361'

            article_json["url"] = url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()

            article_json["pub_date"] = clean_pubdate(
                li["created_at"].replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_gzlist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info_before = res.xpath("//ul[@class='clearfix']//a[@class='last up']/@href").extract_first()
        page_info = re.sub(".*index_(\d+)\.html", "\\1", page_info_before)
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//ul[contains(@class, "comlist1")]/li')
        li_list = res.xpath('//ul[@class="news_list"]/li')
        if not li_list:
            li_list = res.xpath('//div[@id="zxpt"]/div')
        url_path = './a/@href'
        title_path = './a/@title'
        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath(".//a/text()").get()
            if url is None:
                url = li.xpath(".//a/@href").get()
                if url is None:
                    continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99361'
            if url.startswith("/"):
                url_before = "http://gz.gd.gov.cn"
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./span[class="time"]/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_gzarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_gzarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="content_title content_title_h1"]//text()').extract()).strip()

    # if not title:
    #     title = ''.join(res.xpath('//div[@class="text-center u-title"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1[@class="text_cencer font24 margin_top20 c282828"]/text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]//text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]//text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//td[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"构")]/following-sibling::td[1]//text()').extract()).strip()

    if organ.startswith('市'):
        organ = '广州' + organ

    fulltext_xpath = '//div[@class="content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@id="zoomcon"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@class="content-content"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99361'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "GZ"
    zt_provider = "gzgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['keyword'] = keyword
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


#  广东省深圳市
def policy_szlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            max_count = re.findall("content_str= (\d+)", para_dicts["data"]["1_1"]['html'])
            max_count = int(max_count[0]) if max_count else 1
            total_page = math.ceil(max_count / 10)
            total_page = total_page if total_page < 20 else 20
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"index_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//div[@class="zx_ml_list"]//ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('span[2]/a/@href').extract_first()
            if not href:
                continue
            base_url = f'http://www.sz.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99362'
            article_json["url"] = url
            article_json["title"] = li.xpath('span[2]/a/@title').extract_first().strip()
            article_json["pub_date"] = li.xpath('span[3]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_szlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(re.findall('\((.*)\)',para_dicts["data"]["1_1"]['html'])[0])
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            allrow = html_json['count']
            total_page = math.ceil(allrow/10)
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['results']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            base_url = 'http://www.sz.gov.cn/zwgk/zfxxgk/zfwj/szfh/'
            href = li['url']
            url = parse.urljoin(base_url, href)
            if 'sz' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99362'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['pub_time']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_szarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_szarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    # title = ''.join(res.xpath('//div[contains(@class,"article_title")]//text()').extract()).strip()
    # if not title:
    #     title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="xx_con"]//em[contains(text(),"文       号")]/parent::p[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="xx_con"]//em[contains(text(),"索")]/parent::p[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="xx_con"]//em[contains(text(),"分       类")]/parent::p[1]/text()').extract()).strip()
    subject_word = ''.join(res.xpath('//div[@class="xx_con"]//em[contains(text(),"主 题 词：")]/parent::p[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="xx_con"]//em[contains(text(),"发布机构")]/parent::p[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '深圳' + organ

    fulltext_xpath = '//div[@class="news_cont_d_wrap"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99362'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "SZ"
    zt_provider = "szgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[@class="fjdown"])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


# 广东省珠海市

def policy_zhuhailist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info_before = res.xpath("//div[@class='page mt20']//a[@class='last']/@href").extract_first()
        page_info = re.sub(".*index_(\d+)\.html", "\\1", page_info_before)
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//ul[contains(@class, "comlist1")]/li')
        li_list = res.xpath('//ul[@class="list"]/li')
        if not li_list:
            li_list = res.xpath('//div[@id="zxpt"]/div')
        url_path = './a/@href'
        title_path = './a/@title'
        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath(".//a/text()").get()
            if url is None:
                url = li.xpath(".//a/@href").get()
                if url is None:
                    continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99363'
            if url.startswith("/"):
                url_before = "http://www.zhuhai.gov.cn"
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./span[class="time"]/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_zhuhaiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_zhuhaiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="content_title content_title_h1"]//text()').extract()).strip()

    # if not title:
    #     title = ''.join(res.xpath('//div[@class="text-center u-title"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="main"]/h4/text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]//text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]//text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//td[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"构")]/following-sibling::td[1]//text()').extract()).strip()

    if organ.startswith('市'):
        organ = '珠海' + organ

    fulltext_xpath = '//div[@class="new_zh"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@id="new_zh"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@class="content-content"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99363'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "ZHUAHI"
    zt_provider = "zhuhaigovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['keyword'] = keyword
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 广东省汕头市
def policy_shantoulist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        all_data = json.loads(para_dicts["data"]["1_1"]['html'])
        total = all_data["total"]
        total_page = math.ceil(int(total) / 100)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = all_data["articles"]

        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["url"]
            title = li["title"]
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99364'

            article_json["url"] = url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()

            article_json["pub_date"] = clean_pubdate(
                li["created_at"].replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_shantoulist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info_before = res.xpath(
            "//div[@id='page_div']//div[@class='pagination_index_num last']/a/@href").extract_first()
        page_info = re.sub(".*index_(\d+)\.html", "\\1", page_info_before)
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//ul[contains(@class, "comlist1")]/li')
        li_list = res.xpath('//div[@class=" wzlm_right "]/ul/li')
        if not li_list:
            li_list = res.xpath('//div[@class="list_right"]/ul/li')
        url_path = './a/@href'
        title_path = './a/@title'
        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath(".//a/text()").get()
            if url is None:
                url = li.xpath(".//a/@href").get()
                if url is None:
                    continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99364'
            if url.startswith("/"):
                url_before = "https://www.shantou.gov.cn"
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./span[class="time"]/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_shantouarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_shantouarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="title document-number"]//text()').extract()).strip()

    if not title:
        title = ''.join(res.xpath('//ucaptitle//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1[@class="text_cencer font24 margin_top20 c282828"]/text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]//text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]//text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//td[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"构")]/following-sibling::td[1]//text()').extract()).strip()

    if organ.startswith('市'):
        organ = '汕头' + organ

    fulltext_xpath = '//div[@class="content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@id="zoomcon"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@class="article-content"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99364'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "SHANTOU"
    zt_provider = "shantougovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['keyword'] = keyword
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 广东省佛山市
def policy_foshanlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        all_data = json.loads(para_dicts["data"]["1_1"]['html'])
        total = all_data["total"]
        total_page = math.ceil(int(total) / 100)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = all_data["articles"]

        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["url"]
            title = li["title"]
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99365'

            article_json["url"] = url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()

            article_json["pub_date"] = clean_pubdate(
                li["created_at"].replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_foshanlist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            page_info = re.findall("Math.ceil\((\d+)", para_dicts["data"]["1_1"]['html'])
            if page_info:
                total_page = math.ceil(int(page_info[0])/20)
                total_page = total_page if total_page < 20 else 20
            else:
                total_page = 1
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//ul[contains(@class, "comlist1")]/li')
        li_list = res.xpath('//ul[@class="list inlist3"]/li')
        if not li_list:
            li_list = res.xpath('//ul[@class="listu"]/li')
        url_path = './a/@href'
        title_path = './a/@title'
        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath(".//a/text()").get()
            if url is None:
                url = li.xpath(".//a/@href").get()
                if url is None:
                    continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99365'
            if url.startswith("/"):
                url_before = "http://www.foshan.gov.cn"
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./span[class="time"]/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_foshanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_foshanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="title document-number"]//text()').extract()).strip()

    if not title:
        title = ''.join(res.xpath('//ucaptitle//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h3[@class="conetent-title"]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath(
        '//div[@class="classify"]//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]//text()').extract()).strip()
    index_no = ''.join(res.xpath(
        '//div[@class="classify"]//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]//text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//div[@class="classify"]//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]//text()').extract()).strip()
    subject = ''.join(res.xpath(
        '//div[@class="classify"]//td[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]//text()').extract()).strip()
    written_date = ''.join(res.xpath(
        '//div[@class="classify"]//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()
    legal_status = ''.join(res.xpath(
        '//div[@class="classify"]//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]//text()').extract()).strip()
    organ = ''.join(res.xpath(
        '//div[@class="classify"]//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"构")]/following-sibling::td[1]//text()').extract()).strip()

    if organ.startswith('市'):
        organ = '佛山' + organ

    fulltext_xpath = '//div[@class="m-con"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@id="zoomcon"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="article-content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99365'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "FOSHAN"
    zt_provider = "foshangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['keyword'] = keyword
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 广东省韶关市
def policy_sglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info_before = res.xpath("//div[@class='pages']//a[@class='last']/@href").extract_first()
        page_info = re.sub(".*index_(\d+)\.html", "\\1", page_info_before)
        if page_info:
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//div[@class="pageList"]/ul/li')
        if not li_list:
            li_list = res.xpath('//div[@id="zxpt"]/div')
        url_path = './a/@href'
        title_path = './a/@title'
        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath(".//a/text()").get()
            if url is None:
                url = li.xpath(".//a/@href").get()
                if url is None:
                    continue
            elif 'htm' not in url:
                continue
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99366'
            if url.startswith("/"):
                url_before = "https://www.sg.gov.cn"
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./span[class="time"]/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_sgarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_sgarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="content_title content_title_h1"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1[@class="article-title"]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath(
        '//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]//text()').extract()).strip()
    index_no = ''.join(res.xpath(
        '//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]//text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//td[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"构")]/following-sibling::td[1]//text()').extract()).strip()

    if organ.startswith('市'):
        organ = '韶关' + organ

    fulltext_xpath = '//div[@class="content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@id="zoomcon"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="content-content"]'

    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99366'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "SG"
    zt_provider = "sggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['keyword'] = keyword
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 广东省河源市
def policy_heyuanlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info_before = res.xpath("//a[@class='last']/@href").extract_first()
        page_info = re.sub(".*index_(\d+)\.html", "\\1", page_info_before)
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//ul[contains(@class, "comlist1")]/li')
        li_list = res.xpath('//ul[@class="list b-mar-b-10"]/li')
        if not li_list:
            li_list = res.xpath('//div[@id="zxpt"]/div')
        url_path = './a/@href'
        title_path = './a/@title'
        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath(".//a/text()").get()
            if url is None:
                url = li.xpath(".//a/@href").get()
                if url is None:
                    continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99367'
            if url.startswith("/"):
                url_before = "http://www.heyuan.gov.cn"
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./span[class="list-date"]/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_heyuanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_heyuanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="content_title content_title_h1"]//text()').extract()).strip()

    # if not title:
    #     title = ''.join(res.xpath('//div[@class="text-center u-title"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="content-title"]/text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]//text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]//text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//td[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"构")]/following-sibling::td[1]//text()').extract()).strip()

    if organ.startswith('市'):
        organ = '河源' + organ

    fulltext_xpath = '//div[@class="content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@id="zoomcon"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@class="content-text"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99367'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "HEYUAN"
    zt_provider = "heyuangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['keyword'] = keyword
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 广东省梅州市
def policy_meizhoulist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        data = re.sub("jQuery.*?\((.*)\)", "\\1", para_dicts["data"]["1_1"]['html'])
        all_data = json.loads(data)
        total = all_data["count"]
        total_page = math.ceil(int(total) / 20)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = all_data["results"]

        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["post_url"]
            title = li["title"]
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99368'

            article_json["url"] = url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()

            article_json["pub_date"] = clean_pubdate(
                li["pub_time"].replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_meizhoulist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info_before = res.xpath("//a[@class='last']/@href").extract_first()
        page_info = re.sub(".*index_(\d+)\.html", "\\1", page_info_before)
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//ul[contains(@class, "comlist1")]/li')
        li_list = res.xpath('//ul[@class="list"]/li')
        if not li_list:
            li_list = res.xpath('//ul[@class="list cl"]/li')
        url_path = './a/@href'
        title_path = './a/@title'
        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath(".//a/text()").get()
            if url is None:
                url = li.xpath(".//a/@href").get()
                if url is None:
                    continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99368'
            if url.startswith("/"):
                url_before = "https://www.meizhou.gov.cn"
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./span[class="date"]/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').replace('(', '').replace(')',
                                                                                                                '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_meizhouarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_meizhouarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="title document-number"]//text()').extract()).strip()

    if not title:
        title = ''.join(res.xpath('//ucaptitle//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="show_ti"]/text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//th[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]//text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//th[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]//text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//th[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//th[contains(string(),"主") and contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//th[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//th[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//th[contains(string(),"发") and contains(string(),"机") and contains(string(),"构")]/following-sibling::td[1]//text()').extract()).strip()

    if organ.startswith('市'):
        organ = '梅州' + organ

    fulltext_xpath = '//div[@class="con border1"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@id="zoomcon"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@class="show_con border1"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99368'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "MEIZHOU"
    zt_provider = "meizhougovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['keyword'] = keyword
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 广东省惠州市
def policy_huizhoulist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        data = re.sub("jQuery.*?\((.*)\)", "\\1", para_dicts["data"]["1_1"]['html'])
        all_data = json.loads(data)
        total = all_data["count"]
        total_page = math.ceil(int(total) / 20)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = all_data["results"]

        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["post_url"]
            title = li["title"]
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99369'

            article_json["url"] = url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()

            article_json["pub_date"] = clean_pubdate(
                li["pub_time"].replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_huizhoulist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info_before = res.xpath("//a[@class='last']/@href").extract_first()
        page_info = re.sub(".*index_(\d+)\.html", "\\1", page_info_before)
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//ul[contains(@class, "comlist1")]/li')
        li_list = res.xpath('//div[@class="artList"]/ul')
        if not li_list:
            li_list = res.xpath('//ul[@class="list cl"]/li')
        url_path = './li[@class="li_art_title"]/a/@href'
        title_path = './li[@class="li_art_title"]/a/@title'
        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath("./li[@class='li_art_title']/a/text()").get()
            if url is None:
                url = li.xpath(".//a/@href").get()
                if url is None:
                    continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99369'
            if url.startswith("/"):
                url_before = "http://www.huizhou.gov.cn"
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./li[@class="li_art_date"]/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').replace('(', '').replace(')',
                                                                                                                '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_huizhouarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_huizhouarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="info_title"]//text()').extract()).strip()

    if not title:
        title = ''.join(res.xpath('//ucaptitle//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="show_ti"]/text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//span[contains(string(),"文") and contains(string(),"号")]/ancestor::div[1]/text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//span[contains(string(),"索") and contains(string(),"引")]/ancestor::div[1]/text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//span[contains(string(),"主") and contains(string(),"词")]/ancestor::div[1]/text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//span[contains(string(),"分") and contains(string(),"类")]/ancestor::div[1]/text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//span[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/ancestor::div[1]/text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//span[contains(string(),"时") and contains(string(),"效")]/ancestor::div[1]/text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//span[contains(string(),"发") and contains(string(),"机") and contains(string(),"构")]/ancestor::div[1]/text()').extract()).strip()

    if organ.startswith('市'):
        organ = '惠州' + organ

    fulltext_xpath = '//div[@class="row mt-5 doccontent"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@id="zoomcon"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@id="divZoom"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@class="row px-1 px-md-3 doccontent"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99369'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "HUIZHOU"
    zt_provider = "huizhougovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['keyword'] = keyword
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 广东省汕尾市
def policy_shanweilist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        all_data = json.loads(para_dicts["data"]["1_1"]['html'])
        total = all_data["total"]
        total_page = math.ceil(int(total) / 100)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = all_data["articles"]

        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["url"]
            title = li["title"]
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99370'

            article_json["url"] = url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()

            article_json["pub_date"] = clean_pubdate(
                li["created_at"].replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_shanweilist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info_before = res.xpath("//div[@class='pagination_index_num last']/a/@href").extract_first()
        page_info = re.sub(".*index_(\d+)\.html", "\\1", page_info_before)
        if page_info:
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//div[@class="con-right fr"]/div[@class="list_div mar-top2 "]')
        if not li_list:
            li_list = res.xpath('//ul[@class="listu"]/li')
        url_path = './div[@class="list-right_title fon_1"]/a/@href'
        title_path = './div[@class="list-right_title fon_1"]/a/@title'
        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath("./div[@class='list-right_title fon_1']/a/text()").get()
            if url is None:
                url = li.xpath(".//a/@href").get()
                if url is None:
                    continue
            elif 'htm' not in url:
                continue

            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99370'
            if url.startswith("/"):
                url_before = "http://www.shanwei.gov.cn"
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./table//td[contains(text(),"发布时间")]/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_shanweiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_shanweiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="title document-number"]//text()').extract()).strip()

    if not title:
        title = ''.join(res.xpath('//ucaptitle//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="title_cen mar-t2 text"]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath(
        '//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]//text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]//text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//td[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"构")]/following-sibling::td[1]//text()').extract()).strip()
    if not organ:
        organ = ''.join(res.xpath('//span[@id="xxgkfbjg"]/text()').extract()).strip()
        organ = organ.replace('门户网站', '办公室')
    if not organ:
        organ_info = re.findall('市.{1,10}?(局|委员会)', title)
        organ = organ_info[0] if organ_info else ''
    if organ.startswith('市'):
        organ = '汕尾' + organ

    fulltext_xpath = '//div[@class="m-con"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@id="zoom"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="article-content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99370'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "SHANWEI"
    zt_provider = "shanweigovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['keyword'] = keyword
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 广东省东莞市
def policy_dglist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        all_data = json.loads(para_dicts["data"]["1_1"]['html'])
        total = all_data["total"]
        total_page = math.ceil(int(total) / 100)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = all_data["articles"]

        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["url"]
            title = li["title"]
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99371'

            article_json["url"] = url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()

            article_json["pub_date"] = clean_pubdate(
                li["created_at"].replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_dglist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info_before = res.xpath("//a[@class='last']/@href").extract_first()
        # page_info_before = res.xpath("//div[@class='pages']//a[contains(string(), '尾页')]").extract()
        page_info = re.sub(".*index_(\d+)\.html", "\\1", page_info_before)
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//ul[contains(@class, "comlist1")]/li')
        li_list = res.xpath('//div[@class="fl lbkj"]/ul/li')
        if not li_list:
            li_list = res.xpath('//ul[@class="listu"]/li')
        url_path = './/div[@class="tt"]/a/@href'
        title_path = './/div[@class="tt"]/a/@title'
        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath(".//div[@class='tt']/a/text()").get()
            if url is None:
                url = li.xpath(".//a/@href").get()
                if url is None:
                    continue
            # elif 'htm' not in url:
            #     continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99371'
            if url.startswith("/"):
                url_before = "http://www.dg.gov.cn"
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('.//div[contains(@class, "xxly")]//span[contains(@class,"time")]/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_dgarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_dgarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="title document-number"]//text()').extract()).strip()

    if not title:
        title = ''.join(res.xpath('//ucaptitle//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="title_cen mar-t2 text"]/text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]//text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]//text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//td[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"构")]/following-sibling::td[1]//text()').extract()).strip()

    if organ.startswith('市'):
        organ = '东莞' + organ

    fulltext_xpath = '//div[@class="m-con"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@id="zoomcon"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@class="article-content"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99371'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "DG"
    zt_provider = "dggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['keyword'] = keyword
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 广东省中山市
def policy_zslist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        all_data = json.loads(para_dicts["data"]["1_1"]['html'])
        total = all_data["total"]
        total_page = math.ceil(int(total) / 100)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = all_data["articles"]

        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["url"]
            title = li["title"]
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99372'

            article_json["url"] = url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()

            article_json["pub_date"] = clean_pubdate(
                li["created_at"].replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_zslist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info_before = res.xpath("//a[@class='last']/@href").extract_first()
        # page_info_before = res.xpath("//div[@class='pages']//a[contains(string(), '尾页')]").extract()
        page_info = re.sub(".*index_(\d+)\.html", "\\1", page_info_before)
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//ul[contains(@class, "comlist1")]/li')
        li_list = res.xpath('//ul[@id="recordlist"]/li')
        if not li_list:
            li_list = res.xpath('//ul[@class="listu"]/li')
        url_path = './a/@href'
        title_path = './a/@title'
        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath("./a/text()").get()
            if url is None:
                url = li.xpath(".//a/@href").get()
                if url is None:
                    continue
            # elif 'htm' not in url:
            #     continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99372'
            if url.startswith("/"):
                url_before = "http://www.zs.gov.cn"
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./span[@class="date"]/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./span[2]/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_zsarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_zsarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="title document-number"]//text()').extract()).strip()

    if not title:
        title = ''.join(res.xpath('//ucaptitle//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="xs_title"]/text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]//text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]//text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//td[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"构")]/following-sibling::td[1]//text()').extract()).strip()

    if organ.startswith('市'):
        organ = '中山' + organ

    fulltext_xpath = '//div[@class="xs_cnt"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@id="zoomcon"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@class="article-content"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99372'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "ZS"
    zt_provider = "zsgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['keyword'] = keyword
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 广东省江门市
def policy_jiangmenlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        all_data = json.loads(para_dicts["data"]["1_1"]['html'])
        total = all_data["total"]
        total_page = math.ceil(int(total) / 100)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = all_data["articles"]

        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["url"]
            title = li["title"]
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99373'

            article_json["url"] = url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()

            article_json["pub_date"] = clean_pubdate(
                li["created_at"].replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jiangmenlist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            page_info_before = res.xpath("//a[@class='last']/@href").extract_first()
            page_info = re.findall("index_(\d+)\.html", page_info_before)
            max_count = int(page_info[0]) if page_info else 1
            total_page = max_count
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[contains(@class,"infoList")]/li')
        if not li_list:
            li_list = res.xpath('//ul[@class="listu"]/li')
        url_path = './a/@href'
        title_path = './a/@title'
        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath("./a/text()").get()
            if url is None:
                url = li.xpath(".//a/@href").get()
                if url is None:
                    continue
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99373'
            if url.startswith("/"):
                url_before = "http://www.jiangmen.gov.cn"
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./span[@class="date"]/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jiangmenarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jiangmenarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="title document-number"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="xs_title"]/text()').extract()).strip()
    if '\n' in title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath(
        '//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]//text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]//text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//td[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"构")]/following-sibling::td[1]//text()').extract()).strip()

    if organ.startswith('市'):
        organ = '江门' + organ

    fulltext_xpath = '//div[@class="xs_cnt"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@id="zoomcon"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="article-content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99373'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "JIANGMEN"
    zt_provider = "jiangmengovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['keyword'] = keyword
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


# 广东省阳江市

def policy_yangjianglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info_before = res.xpath("//a[@class='last']/@href").extract_first()
        # page_info_before = res.xpath("//div[@class='pages']//a[contains(string(), '尾页')]").extract()
        page_info = re.sub(".*index_(\d+)\.html", "\\1", page_info_before)
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//ul[contains(@class, "comlist1")]/li')
        li_list = res.xpath('//div[contains(@class,"ty_content_1_co_r_co_1")]/ul/li')
        if not li_list:
            li_list = res.xpath('//ul[@class="listu"]/li')
        url_path = './a/@href'
        title_path = './a/@title'
        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath("./a/text()").get()
            if url is None:
                url = li.xpath(".//a/@href").get()
                if url is None:
                    continue
            # elif 'htm' not in url:
            #     continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99374'
            if url.startswith("/"):
                url_before = "http://www.yangjiang.gov.cn"
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./span[@class="date"]/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_yangjiangarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_yangjiangarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="title document-number"]//text()').extract()).strip()

    # if not title:
    #     title = ''.join(res.xpath('//h1/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="xl_con_1_bo_1_ti"]/text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]//text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]//text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//td[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"构")]/following-sibling::td[1]//text()').extract()).strip()

    if organ.startswith('市'):
        organ = '阳江' + organ

    fulltext_xpath = '//div[@class="xs_cnt"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@id="xl_co"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@id="article-content"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@class="article-content"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99374'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "YANGJIANG"
    zt_provider = "yangjianggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['keyword'] = keyword
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 广东省湛江市
def policy_zhanjianglist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        all_data = json.loads(para_dicts["data"]["1_1"]['html'])
        total = all_data["total"]
        total_page = math.ceil(int(total) / 100)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = all_data["articles"]

        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["url"]
            title = li["title"]
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99375'

            article_json["url"] = url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()

            article_json["pub_date"] = clean_pubdate(
                li["created_at"].replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_zhanjianglist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info_before = res.xpath("//a[@class='last']/@href").extract_first()
        # page_info_before = res.xpath("//div[@class='pages']//a[contains(string(), '尾页')]").extract()
        page_info = re.sub(".*index_(\d+)\.html", "\\1", page_info_before)
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//ul[contains(@class, "comlist1")]/li')
        li_list = res.xpath('//ul[contains(@class,"infoList")]/li')
        if not li_list:
            li_list = res.xpath('//ul[@class="list"]/li')
        url_path = './a/@href'
        title_path = './a/@title'
        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath("./a/text()").get()
            if url is None:
                url = li.xpath(".//a/@href").get()
                if url is None:
                    continue
            # elif 'htm' not in url:
            #     continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99375'
            if url.startswith("/"):
                url_before = "http://www.zhanjiang.gov.cn"
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./span[@class="time"]/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_zhanjiangarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_zhanjiangarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="title document-number"]//text()').extract()).strip()

    if not title:
        title = ''.join(res.xpath('//h3//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="xs_title"]/text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]//text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]//text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//td[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"构")]/following-sibling::td[1]//text()').extract()).strip()

    if organ.startswith('市'):
        organ = '湛江' + organ

    fulltext_xpath = '//div[@class="article"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@id="zoomcon"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@class="article-content"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99375'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "ZHANJIANG"
    zt_provider = "zhanjianggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['keyword'] = keyword
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 广东省茂名市
def policy_maominglist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        all_data = json.loads(para_dicts["data"]["1_1"]['html'])
        total = all_data["total"]
        total_page = math.ceil(int(total) / 100)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = all_data["articles"]

        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["url"]
            title = li["title"]
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99376'

            article_json["url"] = url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()

            article_json["pub_date"] = clean_pubdate(
                li["created_at"].replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_maominglist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info_before = res.xpath("//a[@class='last up']/@href").extract_first()
        # page_info_before = res.xpath("//div[@class='pages']//a[contains(string(), '尾页')]").extract()
        page_info = re.sub(".*index_(\d+)\.html", "\\1", page_info_before)
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//ul[contains(@class, "comlist1")]/li')
        li_list = res.xpath('//ul[contains(@class,"GsTL5")]/li')
        if not li_list:
            li_list = res.xpath('//ul[@class="list"]/li')
        url_path = './a/@href'
        title_path = './a/@title'
        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath("./a/text()").get()
            if url is None:
                url = li.xpath(".//a/@href").get()
                if url is None:
                    continue
            # elif 'htm' not in url:
            #     continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99376'
            if url.startswith("/"):
                url_before = "http://www.maoming.gov.cn"
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./span[@class="time"]/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_maomingarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_maomingarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="title document-number"]//text()').extract()).strip()

    if not title:
        title = ''.join(res.xpath('//h3//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="title"]/text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]//text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]//text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//td[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"构")]/following-sibling::td[1]//text()').extract()).strip()

    if organ.startswith('市'):
        organ = '茂名' + organ

    fulltext_xpath = '//div[@class="article"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@id="zoomcon"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@class="article-content"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99376'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "MAOMING"
    zt_provider = "maominggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['keyword'] = keyword
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 广东省肇庆市
def policy_zhaoqinglist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        all_data = json.loads(para_dicts["data"]["1_1"]['html'])
        total = all_data["total"]
        total_page = math.ceil(int(total) / 100)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = all_data["articles"]

        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["url"]
            title = li["title"]
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99377'

            article_json["url"] = url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()

            article_json["pub_date"] = clean_pubdate(
                li["created_at"].replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_zhaoqinglist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # page_info_before = res.xpath("//a[@class='last up']/@href").extract_first()
        # page_info_before = res.xpath("//a[contains(string(), '尾页')]/@href").extract_first()
        # page_info = re.sub(".*index_(\d+)\.html", "\\1", page_info_before)
        # if page_info:
        #     # max_count = re.findall('pageCount:(\d+),', page_info)
        #     # if not max_count:
        #     #     max_count = re.findall('\((\d+)\)', page_info)
        #     max_count = int(page_info) if page_info else 1
        #     total_page = max_count
        # else:
        #     total_page = 1
        # 根据网页函数显示, 最多只会提供30页
        total_page = 30
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//ul[contains(@class, "comlist1")]/li')
        li_list = res.xpath('//ul[contains(@class,"GsTL5")]/li')
        if not li_list:
            li_list = res.xpath('//ul[@class="comlist"]/li')
        url_path = './a/@href'
        title_path = './a/@title'
        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath("./a/text()").get()
            if url is None:
                url = li.xpath(".//a/@href").get()
                if url is None:
                    continue
            # elif 'htm' not in url:
            #     continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99377'
            if url.startswith("/"):
                url_before = "http://www.zhaoqing.gov.cn"
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./span[@class="time"]/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_zhaoqingarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_zhaoqingarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="title document-number"]//text()').extract()).strip()

    if not title:
        title = ''.join(res.xpath('//h1//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="title"]/text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]//text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]//text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//td[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"构")]/following-sibling::td[1]//text()').extract()).strip()

    if organ.startswith('市'):
        organ = '肇庆' + organ

    fulltext_xpath = '//div[contains(@class,"article-content")]|//div[@class="Custom_UnionStyle"]'
    fulltext = ''.join(res.xpath(fulltext_xpath).extract())
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99377'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "ZHAOQING"
    zt_provider = "zhaoqinggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['keyword'] = keyword
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 广东省清远市

def policy_gdqylist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info_before = res.xpath("//a[@class='last']/@href").extract_first()
        page_info = re.sub(".*index_(\d+)\.html", "\\1", page_info_before)
        if page_info:
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//div[contains(@class,"pageList")]/li')
        if not li_list:
            li_list = res.xpath('//ul[@class="list"]/li')
        url_path = './a/@href'
        title_path = './a/@title'
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath("./a/text()").get()
            if url is None:
                url = li.xpath(".//a/@href").get()
                if url is None:
                    continue
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99378'
            if url.startswith("/"):
                url_before = "http://www.gdqy.gov.cn"
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./span[@class="time"]/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_gdqyarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_gdqyarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="title document-number"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1[@class="article-title"]/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath(
        '//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]//text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]//text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//td[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"构")]/following-sibling::td[1]//text()').extract()).strip()

    if organ.startswith('市'):
        organ = '清远' + organ

    fulltext_xpath = '//div[@class="article"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@id="zoomcon"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="article-content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99378'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "GDQY"
    zt_provider = "gdqygovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['keyword'] = keyword
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


# 广东省潮州市

def policy_chaozhoulist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info_before = res.xpath("//a[@class='last']/@href").extract_first()
        # page_info_before = res.xpath("//div[@class='pages']//a[contains(string(), '尾页')]").extract()
        page_info = re.sub(".*index_(\d+)\.html", "\\1", page_info_before)
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//ul[contains(@class, "comlist1")]/li')
        li_list = res.xpath('//div[contains(@class,"pageList")]/li')
        if not li_list:
            li_list = res.xpath('//ul[@class="ul_news"]/li')
        url_path = './a/@href'
        title_path = './a/@title'
        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath("./a/text()").get()
            if url is None:
                url = li.xpath(".//a/@href").get()
                if url is None:
                    continue
            # elif 'htm' not in url:
            #     continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99379'
            if url.startswith("/"):
                url_before = "http://www.chaozhou.gov.cn"
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./span[@class="time"]/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath(".//b/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_chaozhouarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_chaozhouarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="title document-number"]//text()').extract()).strip()

    if not title:
        title = ''.join(res.xpath('//h2//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h1[@class="article-title"]/text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'

    if len(title) >= 100:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]//text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]//text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//td[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"构")]/following-sibling::td[1]//text()').extract()).strip()

    if organ.startswith('市'):
        organ = '潮州' + organ

    fulltext_xpath = '//div[@class="info"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@id="zoomcon"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@class="article-content"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99379'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CHAOZHOU"
    zt_provider = "chaozhougovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['keyword'] = keyword
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 广东省揭阳市
def policy_jieyanglist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        all_data = json.loads(para_dicts["data"]["1_1"]['html'])
        total = all_data["total"]
        total_page = math.ceil(int(total) / 100)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = all_data["articles"]

        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["url"]
            title = li["title"]
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99380'

            article_json["url"] = url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()

            article_json["pub_date"] = clean_pubdate(
                li["created_at"].replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jieyanglist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info_before = res.xpath("//a[@class='last']/@href").extract_first()
        # page_info_before = res.xpath("//div[@class='pages']//a[contains(string(), '尾页')]").extract()
        page_info = re.sub(".*index_(\d+)\.html", "\\1", page_info_before)
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//ul[contains(@class, "comlist1")]/li')
        li_list = res.xpath('//div[contains(@class,"list")]/ul/li')
        if not li_list:
            li_list = res.xpath('//ul[@class="list"]/li')
        url_path = './a/@href'
        title_path = './a/@title'
        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath("./a/text()").get()
            if url is None:
                url = li.xpath(".//a/@href").get()
                if url is None:
                    continue
            # elif 'htm' not in url:
            #     continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99380'
            if url.startswith("/"):
                url_before = "http://www.jieyang.gov.cn"
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./span[@class="time"]/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jieyangarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jieyangarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="title document-number"]//text()').extract()).strip()

    if not title:
        title = ''.join(res.xpath('//h3//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="tit"]/text()').extract()).strip()
    if len(title) >= 50:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath(
        '//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]//text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]//text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//td[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"构")]/following-sibling::td[1]//text()').extract()).strip()

    if organ.startswith('市'):
        organ = '揭阳' + organ

    fulltext_xpath = '//div[@class="article"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@id="zoom"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@class="article-content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99380'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "JIEYANG"
    zt_provider = "jieyanggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['keyword'] = keyword
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


# 广东省云浮市
def policy_yunfulist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        all_data = json.loads(para_dicts["data"]["1_1"]['html'])
        total = all_data["total"]
        total_page = math.ceil(int(total) / 100)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = all_data["articles"]

        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li["url"]
            title = li["title"]
            if url is None:

                continue
            elif 'htm' not in url:
                continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99381'

            article_json["url"] = url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()

            article_json["pub_date"] = clean_pubdate(
                li["created_at"].replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_yunfulist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info_before = res.xpath("//a[@class='last']/@href").extract_first()
        # page_info_before = res.xpath("//div[@class='pages']//a[contains(string(), '尾页')]").extract()
        page_info = re.sub(".*index_(\d+)\.html", "\\1", page_info_before)
        if page_info:
            # max_count = re.findall('pageCount:(\d+),', page_info)
            # if not max_count:
            #     max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(page_info) if page_info else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            page_info = list_json["page_info"]
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page_info}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        # li_list = res.xpath('//ul[contains(@class, "comlist1")]/li')
        li_list = res.xpath('//div[@class="ny_right_list"]/li')
        if not li_list:
            li_list = res.xpath('//div[@class="zcgknews_list"]/ul/li')
        url_path = './a/@href'
        title_path = './a/@title'
        # list_json = json.loads(callmodel.sql_model.list_json)
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            url = li.xpath(url_path).get()
            title = li.xpath(title_path).get()
            if title is None:
                title = li.xpath("./a/text()").get()
            if url is None:
                url = li.xpath(".//a/@href").get()
                if url is None:
                    continue
            # elif 'htm' not in url:
            #     continue

            # url_before = "http://www.sanya.gov.cn"
            rawid_list = url.split('/')
            rawid = "{}_{}".format(rawid_list[-2],
                                   rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", ""))
            # rawid = rawid_list[-1].replace(".shtml", "").replace(".html", "").replace(".htm", "")
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99381'
            if url.startswith("/"):
                url_before = "http://www.yunfu.gov.cn"
            else:
                url_before = ""
            article_json["url"] = url_before + url.replace("../", "/").replace("./", "/")
            article_json["title"] = title.strip()
            pub_date_before = li.xpath('./span[@class="time"]/text()').get()
            if not pub_date_before:
                pub_date_before = li.xpath("./span/text()").get()
            if not pub_date_before:
                pub_date_before = ""
            article_json["pub_date"] = clean_pubdate(
                pub_date_before.replace('发布时间：', '').replace('[', '').replace(']', '').strip())
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_yunfuarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_yunfuarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]

    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="title document-number"]//text()').extract()).strip()

    if not title:
        title = ''.join(res.xpath('//div[@class="ny_min_txt"]/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="con_contitle"]/text()').extract()).strip()

    # else:
    #     fulltext_xpath = '//div[@id="zoom"]'
    if not title:
        title = article_json['title'].strip()
    # pub_no_before = res.xpath('//table[contains(@class, "m-detailtb")]//th[contains(text(),"文") and contains(text(),"号")]')

    pub_no = ''.join(res.xpath(
        '//td[contains(string(),"文") and contains(string(),"号")]/following-sibling::td[1]//text()').extract()).strip()

    index_no = ''.join(res.xpath(
        '//td[contains(string(),"索") and contains(string(),"引")]/following-sibling::td[1]//text()').extract()).strip()
    keyword = ''.join(res.xpath(
        '//td[contains(string(),"主") and contains(string(),"词")]/following-sibling::td[1]//text()').extract()).strip()

    subject = ''.join(res.xpath(
        '//td[contains(string(),"分") and contains(string(),"类")]/following-sibling::td[1]//text()').extract()).strip()

    written_date = ''.join(res.xpath(
        '//td[contains(string(),"成") and contains(string(),"文") and contains(string(),"日")]/following-sibling::td[1]//text()').extract()).strip()

    legal_status = ''.join(res.xpath(
        '//td[contains(string(),"时") and contains(string(),"效")]/following-sibling::td[1]//text()').extract()).strip()

    organ = ''.join(res.xpath(
        '//td[contains(string(),"发") and contains(string(),"机") and contains(string(),"构")]/following-sibling::td[1]//text()').extract()).strip()

    if organ.startswith('市'):
        organ = '云浮' + organ

    fulltext_xpath = '//div[@id="zoomcon"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@id="Zoom"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        # fulltext_xpath = '//div[contains(@class, "j-fontContent")]'
        fulltext_xpath = '//div[@class="article-content"]'
        # fulltext_xpath = '//ucapcontent'

        # fulltext_xpath2 = '//div[@class="is-downlist"]'
        # fulltext2 = res.xpath(fulltext_xpath2).extract_first()
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99381'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "YUNFU"
    zt_provider = "yunfugovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['subject'] = subject
    data['keyword'] = keyword
    data['organ'] = organ
    data['written_date'] = clean_pubdate(written_date)

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data
    # fulltext_xpath1 = '//div[contains(@class, "m-relation")]'
    # fulltext_xpath2 = '//div[contains(@class, "m-detailright")]'
    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    # file_info2 = get_file_info(data, res, f'({fulltext_xpath1})')
    # file_info3 = get_file_info(data, res, f'({fulltext_xpath2})')
    # file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    # print(result)
    return result


def policy_hefeiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    # title = ''.join(res.xpath('//td[@class="title"]//text()|//div[@class="zfxxgk_pageTit"]//text()').extract()).strip()
    # if not title:
    #     title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[contains(@class,"hidden-sm")]//th[contains(text(),"文") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[contains(@class,"hidden-sm")]//th[text()="索"]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[contains(@class,"hidden-sm")]//th[contains(text(),"信息分类")]/following::td[1]/text()').extract()).strip()
    subject_word = ''.join(res.xpath('//table[contains(@class,"hidden-sm")]//th[text()="关"]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[contains(@class,"hidden-sm")]//th[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[contains(@class,"hidden-sm")]//th[text()="有"]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[contains(@class,"hidden-sm")]//th[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '合肥' + organ
    fulltext_xpath = '//div[contains(@class,"j-fontContent")]|//div[@class="gzk-article"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99301'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "HEFEI"
    zt_provider = "hefeigovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_wuhuarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if 'class="table_suoyin"' in html:
        pub_no = ''.join(res.xpath('//table[@class="table_suoyin"]//th[contains(text(),"文") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
        index_no = ''.join(res.xpath('//table[@class="table_suoyin"]//th[text()="索"]/following::td[1]/text()').extract()).strip()
        subject = ''.join(res.xpath('//table[@class="table_suoyin"]//th[contains(text(),"信息分类")]/following::td[1]/text()').extract()).strip()
        subject_word = ''.join(res.xpath('//table[@class="table_suoyin"]//th[text()="关"]/following::td[1]/text()').extract()).strip()
        organ = ''.join(res.xpath('//table[@class="table_suoyin"]//th[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
        legal_status = ''.join(res.xpath('//table[@class="table_suoyin"]//th[text()="有"]/following::td[1]/text()').extract()).strip()
        written_date = ''.join(res.xpath('//table[@class="table_suoyin"]//th[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    else:
        pub_no = ''.join(res.xpath('//table[contains(@class,"hidden-sm")]//th[contains(text(),"文") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
        index_no = ''.join(res.xpath('//table[contains(@class,"hidden-sm")]//th[text()="索"]/following::td[1]/text()').extract()).strip()
        subject = ''.join(res.xpath('//table[contains(@class,"hidden-sm")]//th[contains(text(),"信息分类")]/following::td[1]/text()').extract()).strip()
        subject_word = ''.join(res.xpath('//table[contains(@class,"hidden-sm")]//th[text()="关"]/following::td[1]/text()').extract()).strip()
        organ = ''.join(res.xpath('//table[contains(@class,"hidden-sm")]//th[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
        legal_status = ''.join(res.xpath('//table[contains(@class,"hidden-sm")]//th[text()="有"]/following::td[1]/text()').extract()).strip()
        written_date = ''.join(res.xpath('//table[contains(@class,"hidden-sm")]//th[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '芜湖' + organ
    fulltext_xpath = '//div[contains(@class,"j-fontContent")]|//div[@class="gzk-article"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99302'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "WUHU"
    zt_provider = "wuhugovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_bengbuarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    # title = ''.join(res.xpath('//td[@class="title"]//text()|//div[@class="zfxxgk_pageTit"]//text()').extract()).strip()
    # if not title:
    #     title = article_json['title'].strip()
    if not pub_date:
        pub_date_info = ''.join(res.xpath('//span[@class="fbsj sp"]//text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    if not pub_date:
        raise Exception
    pub_no = ''.join(res.xpath('//table[@class="table_suoyin"]//th[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="table_suoyin"]//th[text()="索"]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="table_suoyin"]//th[contains(text(),"信息分类")]/following::td[1]/text()').extract()).strip()
    subject_word = ''.join(res.xpath('//table[@class="table_suoyin"]//th[text()="关"]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="table_suoyin"]//th[contains(text(),"发文机关")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="table_suoyin"]//th[text()="有"]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="table_suoyin"]//th[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '蚌埠' + organ
    fulltext_xpath = '//div[contains(@class,"j-fontContent")]|//div[@class="gzk-article"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99303'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "BENGBU"
    zt_provider = "bengbugovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_tlarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@id="title"]//text()|//div[@class="detail_title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="table"]//b[contains(text(),"文号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="table"]//b[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="table"]//b[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//table[@class="table_suoyin"]//th[text()="关"]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="table"]//b[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="table"]//b[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="table"]//b[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '铜陵' + organ
    fulltext_xpath = '//div[@id="zoom"]|//div[contains(@class,"downmainbox")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99307'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "TL"
    zt_provider = "tlgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_jdzarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    # title = ''.join(res.xpath('//div[@id="title"]//text()|//div[@class="detail_title"]//text()').extract()).strip()
    # if not title:
    #     title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[contains(@class,"xl-tab1")]//strong[contains(text(),"文号")]/parent::span[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[contains(@class,"xl-tab1")]//strong[contains(text(),"索取号")]/parent::span[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[contains(@class,"xl-tab1")]//strong[contains(text(),"文件类别")]/parent::span[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//table[@class="table_suoyin"]//th[text()="关"]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[contains(@class,"xl-tab1")]//strong[contains(text(),"发布机构")]/parent::span[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[contains(@class,"xl-tab1")]//strong[contains(text(),"有效期")]/parent::span[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//div[@class="table"]//b[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '景德镇' + organ
    fulltext_xpath = '//div[contains(@class,"content")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99347'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "JDZ"
    zt_provider = "jdzgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_pingxiangarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    provider_url = article_json['url']
    res = Selector(text=html)

    title = ''.join(res.xpath('//p[@role="contentTitle"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        raise Exception

    pub_date_info = ''.join(res.xpath('//table//span[contains(text(),"成文日期:")]/parent::td[1]/text()').extract()).strip()
    pub_date = clean_pubdate(pub_date_info)
    pub_year = pub_date[:4]
    if not pub_date:
        pub_date_info = ''.join(res.xpath('//div[@class="zw_title"]/span[contains(text(),"发布日期：")]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    if not pub_date:
        raise Exception

    pub_no = ''.join(res.xpath('//table//span[contains(text(),"文") and contains(text(),"号:")]/parent::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table//span[contains(text(),"索") and contains(text(),"号:")]/parent::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table//span[contains(text(),"发文机关:")]/parent::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table//span[contains(text(),"成文日期:")]/parent::td[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '萍乡' + organ
    fulltext_xpath = '//div[@class="wzzw"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99348'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "PINGXIANG"
    zt_provider = "pingxianggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result

