import datetime
import json
import time
import math
import re
import traceback
import urllib
from urllib import parse
import base64
import requests

from parsel import Selector
from re_common.baselibrary.database.mysql import json_update
from re_common.baselibrary.utils.basedict import BaseDicts
from re_common.baselibrary.utils.basetime import BaseTime
from re_common.baselibrary.utils.baseurl import BaseUrl
from re_common.vip.baseencodeid import BaseLngid

from apps.crawler_platform.core_platform.core_sql import CoreSqlValue
from apps.crawler_platform.core_platform.g_model import DealModel, CallBackModel, DealInsertModel, DealUpdateModel, \
    OperatorSqlModel, DealItemModel, \
    EtlDealModel, PolicyListModel, PolicyArticleModel

__all__ = [
    "policy_tjbhlist_callback",
    "policy_tjbharticle_callback",
    "policy_tjbharticle_etl_callback",
    "policy_tjhplist_callback",
    "policy_tjhparticle_callback",
    "policy_tjhparticle_etl_callback",
    "policy_tjhdlist_callback",
    "policy_tjhdarticle_callback",
    "policy_tjhdarticle_etl_callback",
    "policy_tjhxlist_callback",
    "policy_tjhxarticle_callback",
    "policy_tjhxarticle_etl_callback",
    "policy_tjnklist_callback",
    "policy_tjnkarticle_callback",
    "policy_tjnkarticle_etl_callback",
    "policy_tjhblist_callback",
    "policy_tjhbarticle_callback",
    "policy_tjhbarticle_etl_callback",
    "policy_tjhqlist_callback",
    "policy_tjhqarticle_callback",
    "policy_tjhqarticle_etl_callback",
    "policy_tjdllist_callback",
    "policy_tjdlarticle_callback",
    "policy_tjdlarticle_etl_callback",
    "policy_tjxqlist_callback",
    "policy_tjxqarticle_callback",
    "policy_tjxqarticle_etl_callback",
    "policy_tjjnlist_callback",
    "policy_tjjnarticle_callback",
    "policy_tjjnarticle_etl_callback",
    "policy_tjbclist_callback",
    "policy_tjbcarticle_callback",
    "policy_tjbcarticle_etl_callback",
    "policy_tjwqlist_callback",
    "policy_tjwqarticle_callback",
    "policy_tjwqarticle_etl_callback",
    "policy_tjbdlist_callback",
    "policy_tjbdarticle_callback",
    "policy_tjbdarticle_etl_callback",
    "policy_tjjhlist_callback",
    "policy_tjjharticle_callback",
    "policy_tjjharticle_etl_callback",
    "policy_tjnhlist_callback",
    "policy_tjnharticle_callback",
    "policy_tjnharticle_etl_callback",
    "policy_tjjzlist_callback",
    "policy_tjjzarticle_callback",
    "policy_tjjzarticle_etl_callback",
    "policy_fzggwjiangsulist_callback",
    "policy_fzggwjiangsuarticle_callback",
    "policy_fzggwjiangsuarticle_etl_callback",
    "policy_jseicjiangsulist_callback",
    "policy_jseicjiangsuarticle_callback",
    "policy_jseicjiangsuarticle_etl_callback",
    "policy_stdjiangsulist_callback",
    "policy_stdjiangsuarticle_callback",
    "policy_stdjiangsuarticle_etl_callback",
    "policy_jytjiangsulist_callback",
    "policy_jytjiangsuarticle_callback",
    "policy_jytjiangsuarticle_etl_callback",
    "policy_mztjiangsulist_callback",
    "policy_mztjiangsuarticle_callback",
    "policy_mztjiangsuarticle_etl_callback",
    "policy_cztjiangsulist_callback",
    "policy_cztjiangsuarticle_callback",
    "policy_cztjiangsuarticle_etl_callback",
    "policy_jshrssjiangsulist_callback",
    "policy_jshrssjiangsuarticle_callback",
    "policy_jshrssjiangsuarticle_etl_callback",
    "policy_nynctjiangsulist_callback",
    "policy_nynctjiangsuarticle_callback",
    "policy_nynctjiangsuarticle_etl_callback",
    "policy_jsszfhcxjstjiangsulist_callback",
    "policy_jsszfhcxjstjiangsulist1_callback",
    "policy_jsszfhcxjstjiangsuarticle_callback",
    "policy_jsszfhcxjstjiangsuarticle_etl_callback",
    "policy_wjwjiangsulist_callback",
    "policy_wjwjiangsuarticle_callback",
    "policy_wjwjiangsuarticle_etl_callback",
    "policy_nanjinglist_callback",
    "policy_nanjinglist1_callback",
    "policy_nanjingarticle_callback",
    "policy_nanjingarticle_etl_callback",
    "policy_wuxilist_callback",
    "policy_wuxiarticle_callback",
    "policy_wuxiarticle_etl_callback",
    "policy_xzlist_callback",
    "policy_xzlist1_callback",
    "policy_xzarticle_callback",
    "policy_xzarticle_etl_callback",
    "policy_changzhoulist_callback",
    "policy_changzhouarticle_callback",
    "policy_changzhouarticle_etl_callback",
    "policy_suzhoulist_callback",
    "policy_suzhoulist1_callback",
    "policy_suzhouarticle_callback",
    "policy_suzhouarticle_etl_callback",
    "policy_nantonglist_callback",
    "policy_nantonglist1_callback",
    "policy_nantongarticle_callback",
    "policy_nantongarticle_etl_callback",
    "policy_lyglist_callback",
    "policy_lyglist1_callback",
    "policy_lyglist2_callback",
    "policy_lygarticle_callback",
    "policy_lygarticle_etl_callback",
    "policy_huaianlist_callback",
    "policy_huaianlist1_callback",
    "policy_huaianlist2_callback",
    "policy_huaianarticle_callback",
    "policy_huaianarticle_etl_callback",
    "policy_yanchenglist_callback",
    "policy_yanchenglist1_callback",
    "policy_yanchengarticle_callback",
    "policy_yanchengarticle_etl_callback",
    "policy_yangzhoulist_callback",
    "policy_yangzhoulist1_callback",
    "policy_yangzhouarticle_callback",
    "policy_yangzhouarticle_etl_callback",
    "policy_zhenjianglist_callback",
    "policy_zhenjiangarticle_callback",
    "policy_zhenjiangarticle_etl_callback",
    "policy_taizhoulist_callback",
    "policy_taizhoulist1_callback",
    "policy_taizhouarticle_callback",
    "policy_taizhouarticle_etl_callback",
    "policy_suqianlist_callback",
    "policy_suqianarticle_callback",
    "policy_suqianarticle_etl_callback",
    "policy_fgwshlist_callback",
    "policy_fgwsharticle_callback",
    "policy_fgwsharticle_etl_callback",
    "policy_sheitcshlist_callback",
    "policy_sheitcsharticle_callback",
    "policy_sheitcsharticle_etl_callback",
    "policy_stcsmshlist_callback",
    "policy_stcsmsharticle_callback",
    "policy_stcsmsharticle_etl_callback",
    "policy_edushlist_callback",
    "policy_edusharticle_callback",
    "policy_edusharticle_etl_callback",
    "policy_mzjshlist_callback",
    "policy_mzjsharticle_callback",
    "policy_mzjsharticle_etl_callback",
    "policy_czjshlist_callback",
    "policy_czjsharticle_callback",
    "policy_czjsharticle_etl_callback",
    "policy_rsjshlist_callback",
    "policy_rsjsharticle_callback",
    "policy_rsjsharticle_etl_callback",
    "policy_nyncwshlist_callback",
    "policy_nyncwsharticle_callback",
    "policy_nyncwsharticle_etl_callback",
    "policy_zjwshlist_callback",
    "policy_zjwsharticle_callback",
    "policy_zjwsharticle_etl_callback",
    "policy_wsjkwshlist_callback",
    "policy_wsjkwsharticle_callback",
    "policy_wsjkwsharticle_etl_callback",
    "policy_xuhuilist_callback",
    "policy_xuhuiarticle_callback",
    "policy_xuhuiarticle_etl_callback",
    "policy_shcnlist_callback",
    "policy_shcnlist1_callback",
    "policy_shcnarticle_callback",
    "policy_shcnarticle_etl_callback",
    "policy_jinganlist_callback",
    "policy_jinganlist1_callback",
    "policy_jinganlist2_callback",
    "policy_jinganarticle_callback",
    "policy_jinganarticle_etl_callback",
    "policy_shptlist_callback",
    "policy_shptarticle_callback",
    "policy_shptarticle_etl_callback",
    "policy_shyplist_callback",
    "policy_shyparticle_callback",
    "policy_shyparticle_etl_callback",
    "policy_shmhlist_callback",
    "policy_shmharticle_callback",
    "policy_shmharticle_etl_callback",
    "policy_shbsqlist_callback",
    "policy_shbsqarticle_callback",
    "policy_shbsqarticle_etl_callback",
    "policy_jiadinglist_callback",
    "policy_jiadingarticle_callback",
    "policy_jiadingarticle_etl_callback",
    "policy_jinshanlist_callback",
    "policy_jinshanarticle_callback",
    "policy_jinshanarticle_etl_callback",
    "policy_songjianglist_callback",
    "policy_songjianglist1_callback",
    "policy_songjiangarticle_callback",
    "policy_songjiangarticle_etl_callback",
    "policy_shqplist_callback",
    "policy_shqparticle_callback",
    "policy_shqparticle_etl_callback",
    "policy_fengxianlist_callback",
    "policy_fengxianlist1_callback",
    "policy_fengxianlist2_callback",
    "policy_fengxianarticle_callback",
    "policy_fengxianarticle1_callback",
    "policy_fengxianarticle_etl_callback",
    "policy_fengxianarticle1_etl_callback",
    "policy_shcmlist_callback",
    "policy_shcmarticle_callback",
    "policy_shcmarticle_etl_callback",
    "policy_fgwshandonglist_callback",
    "policy_fgwshandonglist1_callback",
    "policy_fgwshandongarticle_callback",
    "policy_fgwshandongarticle_etl_callback",
    "policy_gxtshandonglist_callback",
    "policy_gxtshandonglist1_callback",
    "policy_gxtshandongarticle_callback",
    "policy_gxtshandongarticle_etl_callback",
    "policy_kjtshandonglist_callback",
    "policy_kjtshandonglist1_callback",
    "policy_kjtshandongarticle_callback",
    "policy_kjtshandongarticle_etl_callback",
    "policy_edushandonglist_callback",
    "policy_edushandonglist1_callback",
    "policy_edushandongarticle_callback",
    "policy_edushandongarticle_etl_callback",
    "policy_mztshandonglist_callback",
    "policy_mztshandonglist1_callback",
    "policy_mztshandongarticle_callback",
    "policy_mztshandongarticle_etl_callback",
    "policy_cztshandonglist_callback",
    "policy_cztshandonglist1_callback",
    "policy_cztshandonglist2_callback",
    "policy_cztshandongarticle_callback",
    "policy_cztshandongarticle_etl_callback",
    "policy_hrssshandonglist_callback",
    "policy_hrssshandonglist1_callback",
    "policy_hrssshandongarticle_callback",
    "policy_hrssshandongarticle_etl_callback",
    "policy_nyncshandonglist_callback",
    "policy_nyncshandongarticle_callback",
    "policy_nyncshandongarticle_etl_callback",
    "policy_zjtshandonglist_callback",
    "policy_zjtshandongarticle_callback",
    "policy_zjtshandongarticle_etl_callback",
    "policy_wsjkwshandonglist_callback",
    "policy_wsjkwshandongarticle_callback",
    "policy_wsjkwshandongarticle_etl_callback",
    "policy_jinanlist_callback",
    "policy_jinanlist1_callback",
    "policy_jinanlist2_callback",
    "policy_jinanarticle_callback",
    "policy_jinanarticle_etl_callback",
    "policy_qingdaolist_callback",
    "policy_qingdaolist1_callback",
    "policy_qingdaoarticle_callback",
    "policy_qingdaoarticle_etl_callback",
    "policy_zibolist_callback",
    "policy_zibolist1_callback",
    "policy_ziboarticle_callback",
    "policy_ziboarticle_etl_callback",
    "policy_zaozhuanglist_callback",
    "policy_zaozhuanglist1_callback",
    "policy_zaozhuangarticle_callback",
    "policy_zaozhuangarticle_etl_callback",
    "policy_dongyinglist_callback",
    "policy_dongyinglist1_callback",
    "policy_dongyingarticle_callback",
    "policy_dongyingarticle_etl_callback",
    "policy_yantailist_callback",
    "policy_yantailist1_callback",
    "policy_yantailist2_callback",
    "policy_yantaiarticle_callback",
    "policy_yantaiarticle_etl_callback",
    "policy_weifanglist_callback",
    "policy_weifanglist1_callback",
    "policy_weifangarticle_callback",
    "policy_weifangarticle_etl_callback",
    "policy_jininglist_callback",
    "policy_jininglist1_callback",
    "policy_jiningarticle_callback",
    "policy_jiningarticle_etl_callback",
    "policy_taianlist_callback",
    "policy_taianlist1_callback",
    "policy_taianlist2_callback",
    "policy_taianarticle_callback",
    "policy_taianarticle_etl_callback",
    "policy_weihailist_callback",
    "policy_weihailist1_callback",
    "policy_weihaiarticle_callback",
    "policy_weihaiarticle_etl_callback",
    "policy_rizhaolist_callback",
    "policy_rizhaolist1_callback",
    "policy_rizhaolist2_callback",
    "policy_rizhaoarticle_callback",
    "policy_rizhaoarticle_etl_callback",
    "policy_binzhoulist_callback",
    "policy_binzhouarticle_callback",
    "policy_binzhouarticle_etl_callback",
    "policy_dezhoulist_callback",
    "policy_dezhouarticle_callback",
    "policy_dezhouarticle_etl_callback",
    "policy_liaochenglist_callback",
    "policy_liaochenglist1_callback",
    "policy_liaochengarticle_callback",
    "policy_liaochengarticle_etl_callback",
    "policy_linyilist_callback",
    "policy_linyiarticle_callback",
    "policy_linyiarticle_etl_callback",
    "policy_hezelist_callback",
    "policy_hezelist1_callback",
    "policy_hezearticle_callback",
    "policy_hezearticle_etl_callback",
    "policy_fgwfujianlist_callback",
    "policy_fgwfujianlist1_callback",
    "policy_fgwfujianarticle_callback",
    "policy_fgwfujianarticle_etl_callback",
    "policy_gxtfujianlist_callback",
    "policy_gxtfujianarticle_callback",
    "policy_gxtfujianarticle_etl_callback",
    "policy_kjtfujianlist_callback",
    "policy_kjtfujianarticle_callback",
    "policy_kjtfujianarticle_etl_callback",
    "policy_jytfujianlist_callback",
    "policy_jytfujianarticle_callback",
    "policy_jytfujianarticle_etl_callback",
    "policy_mztfujianlist_callback",
    "policy_mztfujianarticle_callback",
    "policy_mztfujianarticle_etl_callback",
    "policy_cztfujianlist_callback",
    "policy_cztfujianarticle_callback",
    "policy_cztfujianarticle_etl_callback",
    "policy_rstfujianlist_callback",
    "policy_rstfujianlist1_callback",
    "policy_rstfujianarticle_callback",
    "policy_rstfujianarticle_etl_callback",
    "policy_nynctfujianlist_callback",
    "policy_nynctfujianlist1_callback",
    "policy_nynctfujianarticle_callback",
    "policy_nynctfujianarticle_etl_callback",
    "policy_zjtfujianlist_callback",
    "policy_zjtfujianlist1_callback",
    "policy_zjtfujianarticle_callback",
    "policy_zjtfujianarticle_etl_callback",
    "policy_wjwfujianlist_callback",
    "policy_wjwfujianlist1_callback",
    "policy_wjwfujianarticle_callback",
    "policy_wjwfujianarticle_etl_callback",
    "policy_fuzhoulist_callback",
    "policy_fuzhouarticle_callback",
    "policy_fuzhouarticle_etl_callback",
    "policy_xmlist_callback",
    "policy_xmarticle_callback",
    "policy_xmarticle_etl_callback",
    "policy_zhangzhoulist_callback",
    "policy_zhangzhouarticle_callback",
    "policy_zhangzhouarticle_etl_callback",
    "policy_quanzhoulist_callback",
    "policy_quanzhouarticle_callback",
    "policy_quanzhouarticle_etl_callback",
    "policy_smlist_callback",
    "policy_smlist1_callback",
    "policy_smarticle_callback",
    "policy_smarticle_etl_callback",
    "policy_putianlist_callback",
    "policy_putianarticle_callback",
    "policy_putianarticle_etl_callback",
    "policy_nplist_callback",
    "policy_nparticle_callback",
    "policy_nparticle_etl_callback",
    "policy_longyanlist_callback",
    "policy_longyanarticle_callback",
    "policy_longyanarticle_etl_callback",
    "policy_ningdelist_callback",
    "policy_ningdelist1_callback",
    "policy_ningdearticle_callback",
    "policy_ningdearticle_etl_callback",

    "policy_shhuangpuarticle_etl_callback",
    "policy_shhkarticle_etl_callback",
    "policy_pudongarticle_etl_callback",
]


def clean_pubdate(value):
    if not value:
        return ''
    value = re.sub('\D', '', value)
    value = value[:8]
    value = value.ljust(8, '0')
    if int(value[4:6]) > 12:
        value = value[:4] + '0000'
    if int(value[6:]) > 31:
        value = value[:6] + '00'
    return value


def cleaned(value):
    if value:
        if type(value) is list:
            return ' '.join([i.strip() for i in value]).strip()
        else:
            return value.strip()
    else:
        return ""


def judge_url(url):
    if len(url) > 500:
        return True
    if '/' not in url.replace('//', ''):
        return True
    if 'mailt' in url or 'data:image/' in url or 'javascript:' in url or '#' in url or 'weixin.qq' in url:
        return True
    if '.baidu' in url or '。' in url or '@163' in url or '.cn/）' in url or '8080）' in url or 'cn）' in url:
        return True
    if 'cn，' in url or 'com，' in url or 'cn,' in url or 'haosou.' in url or 'www.so.' in url or 'file://' in url:
        return True
    if 'C:' in url or 'baike.soso' in url or 'weibo.com' in url or 'baike.sogou' in url or 'html）' in url:
        return True
    if 'shtml）' in url or 'phtml）' in url or 'wx.qq.' in url or 'bing.com' in url:
        return True
    if url.endswith('/') or url.endswith('.net') or url.endswith('.asp') or url.endswith('.shtml'):
        return True
    if url.endswith('/share') or url.endswith('.exe') or url.endswith('.xml'):
        return True
    if url.endswith('pdf}') or url.endswith('jpg}'):
        return True
    ends = url.split('/')[-1].lower()
    if not ends:
        return True
    if ends.endswith('.htm') or ends.endswith('.shtml') or ends.endswith('.jhtml') or ends.endswith('.org'):
        return True
    if ends.endswith('xhtml') or ends.endswith('.phtml') or ends.endswith('.cn') or ends.endswith('.com'):
        return True
    if ends.endswith('.html') or ends.endswith('.mht') or ends.endswith('.html%20'):
        return True
    if '.jsp' in ends and len(ends.split('.', 1)[1]) < 7:
        return True

    return False


def get_file_info(data, res, xpath):
    url = data['provider_url']
    pub_year = data['pub_year']
    keyid = data['keyid']
    file_info = list()
    tag_list = res.xpath(f'{xpath}//a')
    url_list = list()
    for tag in tag_list:
        file_href = tag.xpath('@href').extract_first()
        if file_href and file_href.strip():
            file_href = file_href.strip()
            try:
                file_url = parse.urljoin(url, file_href)
            except:
                continue
            if judge_url(file_url):
                continue
            name = ''.join(tag.xpath('.//text()').extract()).strip()
            if file_url in url_list:
                continue
            else:
                url_list.append(file_url)
            file_info.append({'url': file_url, 'name': name, 'pub_year': pub_year, 'keyid': keyid})
    img_hrefs = res.xpath(f'{xpath}//*/@src').extract()
    for img_href in img_hrefs:
        if img_href.strip():
            img_href = img_href.strip()
            img_url = parse.urljoin(url, img_href)
            if judge_url(img_url):
                continue
            if img_url in url_list:
                continue
            else:
                url_list.append(img_url)
            file_info.append({'url': img_url, 'name': img_href, 'pub_year': pub_year, 'keyid': keyid})
    return file_info


def deal_sql_dict(sql_dict):
    sql_dict.pop("id")
    sql_dict.pop("update_time")
    sql_dict.pop("create_time")
    sql_dict.pop("null_dicts")
    sql_dict.pop("err_msg")
    sql_dict.pop("other_dicts")
    sql_dict.pop("state")
    sql_dict.pop("failcount")
    return sql_dict


def init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider):
    data = dict()
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = product
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = zt_provider
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'
    return data


def init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year):
    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    return full_text_data


#   天津市滨海新区
def policy_tjbhlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_info = res.xpath('//a[contains(text(),"末页")]/@href').extract_first()
        if page_info:
            max_count = re.findall('_(\d+)\.', page_info)
            if not max_count:
                max_count = re.findall('\((\d+)\)', page_info)
            max_count = int(max_count[0]) if max_count else 1
            total_page = max_count
        else:
            total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//li[contains(@class,"xxgk-list")]|//div[@class="sec-list"]/ul//li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('div/div[1]/a/@href').extract_first()

            if href.startswith('/'):
                base_url = f'http://www.tjbh.gov.cn'
                url = base_url + href
            else:
                base_url = f'http://www.tjbh.gov.cn/{callmodel.sql_model.list_rawid}.html'
                url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99181'
            article_json["url"] = url
            article_json["title"] = li.xpath('div/div[1]/a/@title|div/div[1]/a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('div/div[2]/span/text()').extract_first().replace('发布日期：', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_tjbharticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_tjbharticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//span[contains(text(),"名") and contains(text(),"称")]/parent::div[1]/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//span[@id="MainTitle"]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//span[contains(text(),"文") and contains(text(),"号")]/parent::div[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//span[contains(text(),"索") and contains(text(),"号")]/parent::div[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//span[contains(text(),"主") and contains(text(),"类")]/parent::div[1]/font/text()').extract()).strip()
    written_date = ''.join(res.xpath('//span[contains(text(),"成") and contains(text(),"期")]/parent::div[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//span[contains(text(),"有") and contains(text(),"性")]/parent::div[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//span[contains(text(),"发") and contains(text(),"构")]/parent::div[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('市'):
        organ = '天津' + organ

    fulltext_xpath = '//div[@id="zoomcon"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99181'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "TJBH"
    zt_provider = "tjbhcngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  天津市和平区
def policy_tjhplist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('countPage = (\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'zfxxgk1' in list_rawid:
        li_list = res.xpath('//ul[@class="list"]/li|//div[contains(@class,"zfxxgkzd")]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.tjhp.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'tjhp' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99182'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip().replace('·', '', 1)
            article_json["pub_date"] = li.xpath('span/text()|b/text()|div[@class="xl-ove"]/span[3]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_tjhparticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_tjhparticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="article  pub_border"]/h2//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="mc"]//span[contains(text(),"发") and contains(text(),"号")]//text()').extract()).strip()
    pub_no = pub_no.split('：')[-1].strip()
    index_no = ''.join(res.xpath( '//div[@class="mc"]//span[contains(text(),"索") and contains(text(),"号")]//text()').extract()).strip()
    index_no = index_no.split('：')[-1].strip()
    subject = ''.join(res.xpath('//div[@class="mc"]//span[contains(text(),"主") and contains(text(),"题")]//text()').extract()).strip()
    subject = subject.split('：')[-1].strip()
    written_date = ''.join(res.xpath('//div[@class="mc"]//span[contains(text(),"成") and contains(text(),"期")]/span/@data-time').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="mc"]//span[contains(text(),"效") and contains(text(),"性")]//text()').extract()).strip()
    legal_status = legal_status.split('：')[-1].strip()
    organ = ''.join(res.xpath('//div[@class="mc"]//span[contains(text(),"发") and contains(text(),"构")]//text()').extract()).strip()
    organ = organ.split('：')[-1].strip()
    if organ.startswith('市'):
        organ = '天津' + organ

    fulltext_xpath = '//div[@class="pages_content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99182'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "TJHP"
    zt_provider = "tjhpcngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  天津市河东区
def policy_tjhdlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('countPage = (\d+)', para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall("countPage: parseInt\('(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'zfxxgk1' in list_rawid:
        li_list = res.xpath('//ul[@class="xl-r2-list"]/li|//ul[@class="news_list news_list2"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.tjhd.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            # base_url = f'http://www.bjchp.gov.cn'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99183'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/span[@class="content"]/text()|a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('a/span[@class="time"]/text()|a/span/span/text()|div/span[@class="xl-r2li-s3"]/text()').extract_first().replace('发文日期：','').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_tjhdarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_tjhdarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"名") and contains(text(),"称")]/following::div[1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="news_title1"]//text()|//div[@class="qt-title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"发") and contains(text(),"号")]/following::div[1]//text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"索") and contains(text(),"号")]/following::div[1]//text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"主") and contains(text(),"题")]/following::div[1]//text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"成") and contains(text(),"期")]/following::div[1]//text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"发") and contains(text(),"构")]/following::div[1]//text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="fl"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '天津' + organ

    fulltext_xpath = '//div[contains(@class,"page_info")]|//div[@id="xlrllt"]|//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99183'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "TJHD"
    zt_provider = "tjhdcngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  天津市河西区
def policy_tjhxlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('countPage = (\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'zfxxgk1' in list_rawid:
        li_list = res.xpath('//div[@id="xxgk_right"]//ul/li|//ul[@class="inTyList"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.tjhx.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            # base_url = f'http://www.bjchp.gov.cn'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99184'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()|.//div[@class="xl-ove"]/span[@class="xl-r2li-s3"]/text()|b/text()').extract_first().replace('发文日期：','').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_tjhxarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_tjhxarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@id="content_zcwj"]/h1//text()|//div[@class="tyContent"]/h1//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//td[contains(text(),"发文字号：")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//td[contains(text(),"索引号：")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//td[contains(text(),"主题分类：")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//td[contains(text(),"成文日期：")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//td[contains(text(),"有效性：")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//td[contains(text(),"发布机构：")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="fl"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '天津' + organ

    fulltext_xpath = '//div[@class="contentCons"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99184'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "TJHX"
    zt_provider = "tjhxcngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  天津市南开区
def policy_tjnklist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('countPage = (\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'zfxxgk1' in list_rawid:
        li_list = res.xpath('//div[@class="hd-right"]//ul/li|//ul[@class="xl-r2-list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.tjnk.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            # base_url = f'http://www.bjchp.gov.cn'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99185'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/span[@class="word"]/text()|a/text()').extract_first().strip()
            if not li.xpath('a/span[@class="date"]/text()|div/span[@class="xl-r2li-s3"]/text()').extract_first():
                continue
            article_json["pub_date"] = li.xpath('a/span[@class="date"]/text()|div/span[@class="xl-r2li-s3"]/text()').extract_first().replace('发文日期：','').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_tjnkarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_tjnkarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    # pub_date = clean_pubdate(article_json['pub_date'])
    # pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"名") and contains(text(),"称")]/following::div[1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="qt-title"]//text()|//div[@class="big-title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    # if not pub_date:
    pub_date_info = ''.join(res.xpath('//div[@class="news-details"]//span[contains(text(),"时间：")]/text()').extract()).strip()
    pub_date = clean_pubdate(pub_date_info)
    pub_year = pub_date[:4]
    if not pub_date:
        pub_date_info = ''.join(res.xpath('//span[contains(text(),"发布时间：")]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    if not pub_date:
        pub_date_info = re.findall("fbrqValue = '(.*?)'", html)
        pub_date = clean_pubdate(pub_date_info[0]) if pub_date_info else ''
        pub_year = pub_date[:4]
    if not pub_date:
        raise Exception
    pub_no = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"发") and contains(text(),"号")]/following::div[1]//text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"索") and contains(text(),"号")]/following::div[1]//text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"主") and contains(text(),"题")]/following::div[1]//text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"成") and contains(text(),"期")]/following::div[1]//text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"发") and contains(text(),"构")]/following::div[1]//text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="fl"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '天津' + organ

    fulltext_xpath = '//div[@id="zoom"]|//div[@id="xlrllt"]|//div[@id="detail"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99185'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "TJNK"
    zt_provider = "tjnkcngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  天津市河北区
def policy_tjhblist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('countPage = (\d+)', para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall("countPage: parseInt\('(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'zfxxgk1' in list_rawid:
        li_list = res.xpath('//ul[@class="first-list"]/li|//ul[@class="xl-r2-list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.tjhb.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            # base_url = f'http://www.bjchp.gov.cn'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99186'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('a/span/text()|.//div[@class="xl-ove"]/span[@class="xl-r2li-s3"]/text()').extract_first().replace('发文日期：','').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_tjhbarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_tjhbarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"名") and contains(text(),"称")]/following::div[1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="qt-title"]//text()|//div[@class="big-title"]//text()|//h2[@class="gener-title f24 mf32"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"发") and contains(text(),"号")]/following::div[1]//text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"索") and contains(text(),"号")]/following::div[1]//text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"主") and contains(text(),"题")]/following::div[1]//text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"成") and contains(text(),"期")]/following::div[1]//text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"发") and contains(text(),"构")]/following::div[1]//text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="fl"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '天津' + organ

    fulltext_xpath = '//div[@id="zoom"]|//div[@id="xlrllt"]|//div[@id="detail"]|//div[@class="gener-text"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99186'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "TJHB"
    zt_provider = "tjhbcngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  天津市红桥区
def policy_tjhqlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("countPage: parseInt\('(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'zfxxgk1' in list_rawid:
        li_list = res.xpath('//div[@class="list_content"]/a|//ul[@class="xl-r2-list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href|@href').extract_first()
            base_url = f'http://www.tjhq.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            # base_url = f'http://www.bjchp.gov.cn'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99187'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()|text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span[contains(@class,"list_content_time")]/text()|div/span[@class="xl-r2li-s3"]/text()').extract_first().replace('发文日期：','').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_tjhqarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_tjhqarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"名") and contains(text(),"称")]/following::div[1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="service_title service_title2"]//text()|//div[@class="qt-title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"发") and contains(text(),"号")]/following::div[1]//text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"索") and contains(text(),"号")]/following::div[1]//text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"主") and contains(text(),"题")]/following::div[1]//text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"成") and contains(text(),"期")]/following::div[1]//text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"有") and contains(text(),"性")]/following::div[1]//text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"发") and contains(text(),"构")]/following::div[1]//text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="fl"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '天津' + organ

    fulltext_xpath = '//div[@id="zoom"]|//div[@id="xlrllt"]|//div[@id="detail"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99187'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "TJHQ"
    zt_provider = "tjhqcngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  天津市东丽区
def policy_tjdllist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        if 'gongkai' in callmodel.sql_model.list_rawid:
            max_count = re.findall('countPage = (\d+)', para_dicts["data"]["1_1"]['html'])
            max_count = int(max_count[0]) if max_count else 1
            total_page = max_count
        else:
            html_json = json.loads(para_dicts["data"]["1_1"]['html'])
            total_page = html_json['page']['totalPages']
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                if 'gongkai' in callmodel.sql_model.list_rawid:
                    dic = {"page_info": f"index_{page}.html"}
                else:
                    dic = {"page_info": f"pageNumber={page+1}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        if 'gongkai' in list_rawid:
            li_list = res.xpath('//div[@id="xxgk_right"]//ul/li|//ul[@class="inTyList"]/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                base_url = f'http://www.tjdl.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                # base_url = f'http://www.bjchp.gov.cn'
                url = parse.urljoin(base_url, href)
                # url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-2]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99188'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span/text()|b/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            html_json = json.loads(para_dicts["data"]["1_1"]['html'])
            li_list = html_json['page']['content']
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li['DOCPUBURL']
                base_url = f'http://www.tjhx.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                # base_url = f'http://www.bjchp.gov.cn'
                url = parse.urljoin(base_url, href)
                # url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-2]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99188'
                article_json["url"] = url
                article_json["title"] = li['BT']
                article_json["pub_date"] = li['FBRQ'][:10]
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_tjdlarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_tjdlarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="tyContent"]/h1//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="zcnrField"]//td[contains(text(),"文") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="zcnrField"]//td[contains(text(),"索") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="zcnrField"]//td[contains(text(),"主") and contains(text(),"类")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="zcnrField"]//td[contains(text(),"成") and contains(text(),"期")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="zcnrField"]//td[contains(text(),"有") and contains(text(),"性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="zcnrField"]//td[contains(text(),"发") and contains(text(),"构")]/following::td[1]/text()').extract()).strip()
    organ1 = ''.join(res.xpath('//div[@class="zcnrField"]//td[contains(text(),"联") and contains(text(),"位")]/following::td[1]/text()').extract()).strip()
    organ = f"{organ};{organ1}" if organ and organ1 else organ
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="fl"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '天津' + organ

    fulltext_xpath = '//div[@class="contentCons"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99188'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "TJDL"
    zt_provider = "tjdlcngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  天津市西青区
def policy_tjxqlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("countPage = (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'zfxxgk1' in list_rawid:
        li_list = res.xpath('//div[@class="common_list"]/ul/li|//div[@id="xxgk_right"]//ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.tjxq.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            # base_url = f'http://www.bjchp.gov.cn'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99189'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()|div/span[@class="xl-r2li-s3"]/text()').extract_first().replace('发文日期：','').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_tjxqarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_tjxqarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="title"]//h3/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="mc"]//span[contains(text(),"发") and contains(text(),"号")]//text()').extract()).strip()
    pub_no = pub_no.split('：')[-1].strip()
    index_no = ''.join(res.xpath('//div[@class="mc"]//span[contains(text(),"索") and contains(text(),"号")]//text()').extract()).strip()
    index_no = index_no.split('：')[-1].strip()
    subject = ''.join(res.xpath('//div[@class="mc"]//span[contains(text(),"主") and contains(text(),"题")]//text()').extract()).strip()
    subject = subject.split('：')[-1].strip()
    written_date = ''.join(res.xpath('//div[@class="mc"]//span[contains(text(),"成") and contains(text(),"期")]//text()').extract()).strip()
    written_date = written_date.split('：')[-1].strip()
    organ = ''.join(res.xpath('//div[@class="mc"]//span[contains(text(),"发") and contains(text(),"构")]//text()').extract()).strip()
    organ = organ.split('：')[-1].strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="fl"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '天津' + organ

    fulltext_xpath = '//div[@class="nrqy"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99189'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "TJXQ"
    zt_provider = "tjxqcngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  天津市津南区
def policy_tjjnlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("countPage = (\d+)", para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall("countPage: parseInt\('(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'zfxxgk1' in list_rawid:
        li_list = res.xpath('//div[@class="common-listr"]/ul/li|//ul[@class="xl-r2-list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.tjjn.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            # base_url = f'http://www.bjchp.gov.cn'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99190'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/p/text()|a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('a/span/text()|a/p/span/text()|div/span[@class="xl-r2li-s3"]/text()').extract_first().replace('发文日期：','').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_tjjnarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_tjjnarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"名") and contains(text(),"称")]/following::div[1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//p[@class="ty-content-main-title"]//text()|//div[@class="qt-title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"发") and contains(text(),"号")]/following::div[1]//text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"索") and contains(text(),"号")]/following::div[1]//text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"主") and contains(text(),"题")]/following::div[1]//text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"成") and contains(text(),"期")]/following::div[1]//text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"有") and contains(text(),"性")]/following::div[1]//text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"发") and contains(text(),"构")]/following::div[1]//text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="fl"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '天津' + organ

    fulltext_xpath = '//div[@id="zoom"]|//div[@id="xlrllt"]|//div[@id="detail"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99190'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "TJJN"
    zt_provider = "tjjncngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  天津市北辰区
def policy_tjbclist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("countPage = (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'zfxxgk1' in list_rawid:
        li_list = res.xpath('//div[@id="xxgk_right"]//ul/li|//ul[@class="inTyList"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.tjbc.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            # base_url = f'http://www.bjchp.gov.cn'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99191'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()|b/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_tjbcarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_tjbcarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="title"]/h3/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="mc"]//span[contains(text(),"发") and contains(text(),"号")]//text()').extract()).strip()
    pub_no = pub_no.split('：')[-1].strip()
    index_no = ''.join(res.xpath('//div[@class="mc"]//span[contains(text(),"索") and contains(text(),"号")]//text()').extract()).strip()
    index_no = index_no.split('：')[-1].strip()
    subject = ''.join(res.xpath('//div[@class="mc"]//span[contains(text(),"主") and contains(text(),"题")]//text()').extract()).strip()
    subject = subject.split('：')[-1].strip()
    written_date = ''.join(res.xpath('//div[@class="mc"]//span[contains(text(),"成") and contains(text(),"期")]//text()').extract()).strip()
    written_date = written_date.split('：')[-1].strip()
    organ = ''.join(res.xpath('//div[@class="mc"]//span[contains(text(),"发") and contains(text(),"构")]//text()').extract()).strip()
    organ = organ.split('：')[-1].strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="fl"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '天津' + organ

    fulltext_xpath = '//div[@class="nrqy"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99191'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "TJBC"
    zt_provider = "tjbccngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  天津市武清区
def policy_tjwqlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:

        max_count = re.findall("countPage = (\d+)", para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall("countPage: parseInt\('(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        if 'zwgk/zcwj' in callmodel.sql_model.list_rawid:
            max_count = 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        if 'zwgk/zcwj' in list_rawid:
            li_list = re.findall('tempNode = \{.*?\};', para_dicts["data"]["1_1"]['html'], re.S)
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = re.findall('"url":"(.*?)"', li)[0]
                base_url = base_url = f'https://www.tjwq.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99192'
                article_json["url"] = url
                article_json["title"] = re.findall('"title":"(.*?)"', li)[0].strip()
                article_json["pub_date"] = re.findall('"date":"(.*?)"', li)[0].strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//div[@class="news-list mf26"]//ul/li|//ul[@class="xl-r2-list"]/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                base_url = f'https://www.tjwq.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                # base_url = f'http://www.bjchp.gov.cn'
                url = parse.urljoin(base_url, href)
                # url = base_url + href
                if 'htm' not in url or url.endswith('/'):
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-2]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99192'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('a/span/text()|span/text()|div/span[@class="xl-r2li-s3"]/text()').extract_first().replace('发文日期：','').strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_tjwqarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_tjwqarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"名") and contains(text(),"称")]/following::div[1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h2[@class="gener-title f24 mf32"]//text()|//div[@class="qt-title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"发") and contains(text(),"号")]/following::div[1]//text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"索") and contains(text(),"号")]/following::div[1]//text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"主") and contains(text(),"题")]/following::div[1]//text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"成") and contains(text(),"期")]/following::div[1]//text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"发") and contains(text(),"构")]/following::div[1]//text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="fl"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '天津' + organ

    fulltext_xpath = '//div[@id="xlrllt"]|//div[@id="zoom"]|//div[@class="gener-text"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99192'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "TJWQ"
    zt_provider = "tjwqcngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  天津市宝坻区
def policy_tjbdlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("countPage = (\d+)", para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall("countPage: parseInt\('(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'zfxxgk1' in list_rawid:
        li_list = res.xpath('//div[@class="news-list"]//ul/li|//ul[@class="xl-r2-list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.tjbd.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            # base_url = f'http://www.bjchp.gov.cn'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99193'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/div[@class="list3"]/text()|a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('a/div[@class="list4"]/text()|.//div[@class="xl-ove"]/span[@class="xl-r2li-s3"]/text()').extract_first().replace('发文日期：','').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_tjbdarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_tjbdarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"名") and contains(text(),"称")]/following::div[1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//p[@class="ty-content-main-title"]//text()|//div[@class="qt-title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"发") and contains(text(),"号")]/following::div[1]//text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"索") and contains(text(),"号")]/following::div[1]//text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"主") and contains(text(),"题")]/following::div[1]//text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"成") and contains(text(),"期")]/following::div[1]//text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"发") and contains(text(),"构")]/following::div[1]//text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="fl"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '天津' + organ

    fulltext_xpath = '//div[@id="xlrllt"]|//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99193'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "TJBD"
    zt_provider = "tjbdcngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  天津市静海区
def policy_tjjhlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("countPage = (\d+)", para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall("countPage: parseInt\('(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'zfxxgk1' in list_rawid:
        li_list = res.xpath('//ul[@class="r-list"]/li|//ul[@class="xl-r2-list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.tjjh.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            # base_url = f'http://www.bjchp.gov.cn'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99194'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()|.//div[@class="xl-ove"]/span[@class="xl-r2li-s3"]/text()').extract_first().replace('发文日期：','').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_tjjharticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_tjjharticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"名") and contains(text(),"称")]/following::div[1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="qt-title"]//text()|//div[@class="big-title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"发") and contains(text(),"号")]/following::div[1]//text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"索") and contains(text(),"号")]/following::div[1]//text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"主") and contains(text(),"题")]/following::div[1]//text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"成") and contains(text(),"期")]/following::div[1]//text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"有") and contains(text(),"性")]/following::div[1]//text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"发") and contains(text(),"构")]/following::div[1]//text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="fl"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '天津' + organ

    fulltext_xpath = '//div[@id="xlrllt"]|//div[@id="zoom"]|//div[@id="details"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99194'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "TJJH"
    zt_provider = "tjjhcngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[@class="qt-attachments"])')
    file_info = file_info2 + file_info1
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  天津市宁河区
def policy_tjnhlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("countPage = (\d+)", para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall("countPage: parseInt\('(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'zfxxgk1' in list_rawid:
        li_list = res.xpath('//div[@class="news-list mf26"]/ul/li|//ul[@class="xl-r2-list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'https://www.tjnh.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            # base_url = f'http://www.bjchp.gov.cn'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99195'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()|a/span/text()|div/span[@class="xl-r2li-s3"]/text()').extract_first().replace('发文日期：','').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_tjnharticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_tjnharticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"名") and contains(text(),"称")]/following::div[1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//h2[@class="gener-title f24 mf32"]//text()|//div[@class="qt-title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"发") and contains(text(),"号")]/following::div[1]//text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"索") and contains(text(),"号")]/following::div[1]//text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"主") and contains(text(),"题")]/following::div[1]//text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"成") and contains(text(),"期")]/following::div[1]//text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"有") and contains(text(),"性")]/following::div[1]//text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"发") and contains(text(),"构")]/following::div[1]//text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="fl"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '天津' + organ

    fulltext_xpath = '//div[@id="xlrllt"]|//div[@class="gener-text"]|//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99195'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "TJNH"
    zt_provider = "tjnhcngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[@class="qt-attachments qt-has"])')
    file_info = file_info2 + file_info1
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  天津市蓟州区
def policy_tjjzlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("countPage = (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index+1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'zfxxgk1' in list_rawid:
        li_list = res.xpath('//ul[@class="news_list news_list4"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.tjjz.gov.cn/{callmodel.sql_model.list_rawid}.html'
            # base_url = f'http://www.bjchp.gov.cn'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99196'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('a/span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_tjjzarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_tjjzarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"名") and contains(text(),"称")]/following::div[1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="news_title"]//text()|//div[@class="qt-title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"发") and contains(text(),"号")]/following::div[1]//text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"索") and contains(text(),"号")]/following::div[1]//text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"主") and contains(text(),"题")]/following::div[1]//text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"成") and contains(text(),"期")]/following::div[1]//text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"有") and contains(text(),"性")]/following::div[1]//text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="xl-zw-top"]//div[contains(text(),"发") and contains(text(),"构")]/following::div[1]//text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="fl"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '天津' + organ

    fulltext_xpath = '//div[@id="xlrllt"]|//div[contains(@class,"page_info")]|//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99196'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "TJJZ"
    zt_provider = "tjjzcngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[@class="qt-attachments qt-has"])')
    file_info = file_info2 + file_info1
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   江苏省发展和改革委员会
def policy_fzggwjiangsulist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count / 25)

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1) * 25 + 1
                end = (page + 2) * 25
                if end >= max_count:
                    end = max_count
                dic = {"start": start, "end": end, "page_info": list_json["page_info"]}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('.//a/@href').extract_first()
            base_url = f'http://fzggw.jiangsu.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99197'
            article_json["url"] = url
            article_json["title"] = li.xpath('.//a/text()').extract_first().strip()
            if '284' == callmodel.sql_model.list_rawid:
                pub_date = ''.join(li.xpath('text()').extract())
                pub_date = re.sub('\D', '', pub_date).strip()
            else:
                pub_date = li.xpath('span/text()').extract_first().strip()
            article_json["pub_date"] = pub_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_fzggwjiangsuarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_fzggwjiangsuarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"名  称:")]/following::strong[1]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="det_tit"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"文  号:")]/parent::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"索引号:")]/parent::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"分  类:")]/parent::td[1]/text()').extract()).strip()
    subject_word = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"主题词:")]/parent::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"发文日期:")]/parent::td[1]/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//li[contains(text(),"废止日期")]/span/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//li[contains(text(),"有效性")]/span/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"发布机构:")]/parent::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('省'):
        organ = '江苏' + organ

    fulltext_xpath = '//div[@id="article"]|//div[@class="det_content"]|//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99197'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "FZGGWJIANGSU"
    zt_provider = "fzggwjiangsucngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   江苏省工业和信息化厅
def policy_jseicjiangsulist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count/25)

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page+1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1)*25 + 1
                end = (page + 2) * 25
                if end >= max_count:
                    end = max_count
                dic = {"start": start, "end": end, "page_info": list_json["page_info"]}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://gxt.jiangsu.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99198'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            pub_date = ''.join(li.xpath('b/text()|text()').extract())
            pub_date = re.sub('\D', '', pub_date).strip()
            article_json["pub_date"] = pub_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jseicjiangsuarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jseicjiangsuarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//span[@id="lTitle"]/text()|//div[@class="news_title titlep"]//text()|//div[@class="nstit"]/h1//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//b[contains(text(),"文") and contains(text(),"号")]/parent::td[1]/span/text()').extract()).strip()
    index_no = ''.join(res.xpath('//b[contains(text(),"索") and contains(text(),"号")]/parent::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//b[contains(text(),"主") and contains(text(),"类")]/parent::td[1]/span/text()').extract()).strip()
    subject_word = ''.join(res.xpath('//b[contains(text(),"主") and contains(text(),"词")]/parent::td[1]/span/text()').extract()).strip()
    written_date = ''.join(res.xpath('//b[contains(text(),"发文日期")]/parent::td[1]/span/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//li[contains(text(),"废止日期")]/span/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//b[contains(text(),"时") and contains(text(),"效")]/parent::td[1]/span/text()').extract()).strip()
    organ = ''.join(res.xpath('//b[contains(text(),"发布机构")]/parent::td[1]/span/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('省'):
        organ = '江苏' + organ

    fulltext_xpath = '//div[@id="Zoom"]|//div[@class="page_info"]|//div[@id="con1"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99198'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "JSEICJIANGSU"
    zt_provider = "jseicjiangsucngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   江苏省科学技术厅
def policy_stdjiangsulist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count/25)

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page+1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1)*25 + 1
                end = (page + 2) * 25
                if end >= max_count:
                    end = max_count
                dic = {"start": start, "end": end, "page_info": list_json["page_info"]}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'https://kxjst.jiangsu.gov.cn/module/web/jpage/dataproxy.jsp'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99199'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_stdjiangsuarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_stdjiangsuarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//p[@class="con-title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//td[contains(text(),"文") and contains(text(),"号")]/following::td[1]//text()').extract()).strip()
    index_no = ''.join(res.xpath('//td[contains(text(),"索") and contains(text(),"号")]/following::td[1]//text()').extract()).strip()
    subject = ''.join(res.xpath('//td[contains(text(),"分") and contains(text(),"类")]/following::td[1]//text()').extract()).strip()
    subject_word = ''.join(res.xpath('//td[contains(text(),"主") and contains(text(),"词")]/following::td[1]//text()').extract()).strip()
    written_date = ''.join(res.xpath('//td[contains(text(),"发") and contains(text(),"期")]/following::td[1]//text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//li[contains(text(),"废止日期")]/span/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//td[contains(text(),"时") and contains(text(),"效")]/following::td[1]//text()').extract()).strip()
    organ = ''.join(res.xpath('//td[contains(text(),"发") and contains(text(),"构")]/following::td[1]//text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('省'):
        organ = '江苏' + organ

    fulltext_xpath = '//div[@class="main-txt"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99199'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "STDJIANGSU"
    zt_provider = "stdjiangsucngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   江苏省教育厅
def policy_jytjiangsulist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count/25)

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page+1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1)*25 + 1
                end = (page + 2) * 25
                if end >= max_count:
                    end = max_count
                dic = {"start": start, "end": end, "page_info": list_json["page_info"]}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://jyt.jiangsu.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99200'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jytjiangsuarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jytjiangsuarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="sp_title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    # pub_no = ''.join(res.xpath('//td[contains(text(),"文") and contains(text(),"号")]/following::td[1]//text()').extract()).strip()
    # index_no = ''.join(res.xpath('//td[contains(text(),"索") and contains(text(),"号")]/following::td[1]//text()').extract()).strip()
    # subject = ''.join(res.xpath('//td[contains(text(),"分") and contains(text(),"类")]/following::td[1]//text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//td[contains(text(),"主") and contains(text(),"词")]/following::td[1]//text()').extract()).strip()
    # written_date = ''.join(res.xpath('//td[contains(text(),"发") and contains(text(),"期")]/following::td[1]//text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//li[contains(text(),"废止日期")]/span/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//td[contains(text(),"时") and contains(text(),"效")]/following::td[1]//text()').extract()).strip()
    # organ = ''.join(res.xpath('//td[contains(text(),"发") and contains(text(),"构")]/following::td[1]//text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    # if organ.startswith('省'):
    #     organ = '江苏' + organ

    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99200'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "JYTJIANGSU"
    zt_provider = "jytjiangsucngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   江苏省民政厅
def policy_mztjiangsulist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count/25)

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page+1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1)*25 + 1
                end = (page + 2) * 25
                if end >= max_count:
                    end = max_count
                dic = {"start": start, "end": end, "page_info": list_json["page_info"]}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://mzt.jiangsu.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99201'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()|b/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_mztjiangsuarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_mztjiangsuarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//td[@class="title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="xxgk_table"]//td[contains(text(),"文") and contains(text(),"号")]/following::td[1]//text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="xxgk_table"]//td[contains(text(),"索") and contains(text(),"号")]/following::td[1]//text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="xxgk_table"]//td[contains(text(),"分") and contains(text(),"类")]/following::td[1]//text()').extract()).strip()
    subject_word = ''.join(res.xpath('//table[@class="xxgk_table"]//td[contains(text(),"主") and contains(text(),"词")]/following::td[1]//text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="xxgk_table"]//td[contains(text(),"发") and contains(text(),"期")]/following::td[1]//text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//table[@class="xxgk_table"]//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//table[@class="xxgk_table"]//li[contains(text(),"废止日期")]/span/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="xxgk_table"]//td[contains(text(),"时") and contains(text(),"效")]/following::td[1]//text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="xxgk_table"]//td[contains(text(),"发") and contains(text(),"构")]/following::td[1]//text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('省'):
        organ = '江苏' + organ

    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99201'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "MZTJIANGSU"
    zt_provider = "mztjiangsucngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   江苏省财政厅
def policy_cztjiangsulist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count/25)

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page+1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1)*25 + 1
                end = (page + 2) * 25
                if end >= max_count:
                    end = max_count
                dic = {"start": start, "end": end, "page_info": list_json["page_info"]}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://czt.jiangsu.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99202'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cztjiangsuarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_cztjiangsuarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="article-title"]//h1//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="xlt_table"]//td[contains(text(),"文") and contains(text(),"号")]/following::td[1]//text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="xlt_table"]//td[contains(text(),"索") and contains(text(),"号")]/following::td[1]//text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="xlt_table"]//td[contains(text(),"分") and contains(text(),"类")]/following::td[1]//text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//table[@class="xlt_table"]//td[contains(text(),"主") and contains(text(),"词")]/following::td[1]//text()').extract()).strip()
    # written_date = ''.join(res.xpath('//table[@class="xlt_table"]//td[contains(text(),"发") and contains(text(),"期")]/following::td[1]//text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//table[@class="xxgk_table"]//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//table[@class="xxgk_table"]//li[contains(text(),"废止日期")]/span/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="xlt_table"]//td[contains(text(),"时") and contains(text(),"效")]/following::td[1]//text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="xlt_table"]//td[contains(text(),"发") and contains(text(),"构")]/following::td[1]//text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('省'):
        organ = '江苏' + organ

    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99202'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CZTJIANGSU"
    zt_provider = "cztjiangsucngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   江苏省人力资源和社会保障厅
def policy_jshrssjiangsulist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count/25)

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page+1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1)*25 + 1
                end = (page + 2) * 25
                if end >= max_count:
                    end = max_count
                dic = {"start": start, "end": end, "page_info": list_json["page_info"]}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://jshrss.jiangsu.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99203'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/span[@class="list_title"]/text()|a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('a/i/text()|b/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jshrssjiangsuarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jshrssjiangsuarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="xzjd_table"]//div[contains(text(),"文") and contains(text(),"号")]/following::div[1]//text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="xzjd_table"]//div[contains(text(),"索") and contains(text(),"号")]/following::div[1]//text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="xzjd_table"]//div[contains(text(),"分") and contains(text(),"类")]/following::div[1]//text()').extract()).strip()
    subject_word = ''.join(res.xpath('//div[@class="xzjd_table"]//div[contains(text(),"关") and contains(text(),"词")]/following::div[1]//text()').extract()).strip()
    # written_date = ''.join(res.xpath('//table[@class="xlt_table"]//td[contains(text(),"发") and contains(text(),"期")]/following::td[1]//text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//table[@class="xxgk_table"]//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//table[@class="xxgk_table"]//li[contains(text(),"废止日期")]/span/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="xzjd_table"]//div[contains(text(),"时") and contains(text(),"效")]/following::div[1]//text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="xzjd_table"]//div[contains(text(),"发") and contains(text(),"构")]/following::div[1]//text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('省'):
        organ = '江苏' + organ

    fulltext_xpath = '//div[@id="Zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99203'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "JSHRSSJIANGSU"
    zt_provider = "jshrssjiangsucngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   江苏省农业农村厅
def policy_nynctjiangsulist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count/25)

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page+1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1)*25 + 1
                end = (page + 2) * 25
                if end >= max_count:
                    end = max_count
                dic = {"start": start, "end": end, "page_info": list_json["page_info"]}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://nynct.jiangsu.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99204'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('font/text()|span/text()').extract_first().replace('[','').replace(']','').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_nynctjiangsuarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_nynctjiangsuarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//td[@class="title16"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="xxgk_table"]//td[contains(text(),"文") and contains(text(),"号")]/following::td[1]//text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="xxgk_table"]//td[contains(text(),"索") and contains(text(),"号")]/following::td[1]//text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="xxgk_table"]//td[contains(text(),"分") and contains(text(),"类")]/following::td[1]//text()').extract()).strip()
    subject_word = ''.join(res.xpath('//table[@class="xxgk_table"]//td[contains(text(),"主") and contains(text(),"词")]/following::td[1]//text()').extract()).strip()
    # written_date = ''.join(res.xpath('//table[@class="xlt_table"]//td[contains(text(),"发") and contains(text(),"期")]/following::td[1]//text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//table[@class="xxgk_table"]//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//table[@class="xxgk_table"]//li[contains(text(),"废止日期")]/span/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="xxgk_table"]//td[contains(text(),"时") and contains(text(),"效")]/following::td[1]//text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="xxgk_table"]//td[contains(text(),"发") and contains(text(),"构")]/following::td[1]//text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('省'):
        organ = '江苏' + organ

    fulltext_xpath = '//span[@class="green12"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99204'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "NYNCTJIANGSU"
    zt_provider = "nynctjiangsucngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   江苏省住房和城乡建设厅
def policy_jsszfhcxjstjiangsulist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count/25)

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page+1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1)*25 + 1
                end = (page + 2) * 25
                if end >= max_count:
                    end = max_count
                dic = {"start": start, "end": end, "page_info": list_json["page_info"]}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://jsszfhcxjst.jiangsu.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99205'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jsszfhcxjstjiangsulist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall(';共(\d+)页&', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"{list_json['page_info']}_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'zfxxgk1' in list_rawid:
        li_list = res.xpath('//table[@class="xlt_table0"]/tr')[1:]
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('td[2]/a/@href').extract_first()
            base_url = f'http://jsszfhcxjst.jiangsu.gov.cn/col/col52043/index.html?number=02'
            # base_url = f'http://www.bjchp.gov.cn'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99205'
            article_json["url"] = url
            article_json["title"] = li.xpath('td[2]/a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('td[3]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jsszfhcxjstjiangsuarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jsszfhcxjstjiangsuarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//p[@class="tit"]//text()|//div[@class="main-fl-tit"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="xlt_table"]//td[contains(text(),"文") and contains(text(),"号")]/following::td[1]//text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="xlt_table"]//td[contains(text(),"索") and contains(text(),"号")]/following::td[1]//text()').extract()).strip()
    # subject = ''.join(res.xpath('//table[@class="xlt_table"]//td[contains(text(),"分") and contains(text(),"类")]/following::td[1]//text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//table[@class="xlt_table"]//td[contains(text(),"主") and contains(text(),"词")]/following::td[1]//text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="xlt_table"]//td[contains(text(),"生") and contains(text(),"期")]/following::td[1]//text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//table[@class="xxgk_table"]//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//table[@class="xxgk_table"]//li[contains(text(),"废止日期")]/span/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//table[@class="xlt_table"]//td[contains(text(),"时") and contains(text(),"效")]/following::td[1]//text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="xlt_table"]//td[contains(text(),"发") and contains(text(),"构")]/following::td[1]//text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('省'):
        organ = '江苏' + organ

    fulltext_xpath = '//div[@class="main_wzy"]|//div[@class="box_wzy_ys"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99205'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "JSSZFHCXJSTJIANGSU"
    zt_provider = "jsszfhcxjstjiangsucngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   江苏省卫生健康委员会
def policy_wjwjiangsulist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count/25)

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page+1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1)*25 + 1
                end = (page + 2) * 25
                if end >= max_count:
                    end = max_count
                dic = {"start": start, "end": end, "page_info": list_json["page_info"]}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://wjw.jiangsu.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99206'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().replace('[','').replace(']','').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_wjwjiangsuarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_wjwjiangsuarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="main-fl-tit"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table//td[contains(text(),"文") and contains(text(),"号")]/following::td[1]//text()').extract()).strip()
    index_no = ''.join(res.xpath('//table//td[contains(text(),"索") and contains(text(),"号")]/following::td[1]//text()').extract()).strip()
    subject = ''.join(res.xpath('//table//td[contains(text(),"主") and contains(text(),"类")]/following::td[1]//text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//table//td[contains(text(),"主") and contains(text(),"词")]/following::td[1]//text()').extract()).strip()
    written_date = ''.join(res.xpath('//table//td[contains(text(),"生") and contains(text(),"期")]/following::td[1]//text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//table[@class="xxgk_table"]//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//table[@class="xxgk_table"]//li[contains(text(),"废止日期")]/span/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//table//td[contains(text(),"时") and contains(text(),"效")]/following::td[1]//text()').extract()).strip()
    organ = ''.join(res.xpath('//table//td[contains(text(),"发") and contains(text(),"构")]/following::td[1]//text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('省'):
        organ = '江苏' + organ

    fulltext_xpath = '//div[@class="main-fl bt-left"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99206'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "WJWJIANGSU"
    zt_provider = "wjwjiangsucngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   江苏省南京市
def policy_nanjinglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])

        total_page = html_json['pageCount']
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['rows']
        if not li_list:
            li_list = []
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.get('DOCPUBURL', "")
            if not href:
                continue
            base_url = f'https://www.nanjing.gov.cn/zdgk/index.html?id=xxgk_228'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99207'
            article_json["url"] = url
            article_json["title"] = li['DOCTITLE']
            article_json["pub_date"] = li['PUBDATE']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_nanjinglist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        tcount = html_json['count']
        total_page = math.ceil(tcount/20)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['result']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['puburl']
            base_url = f'https://www.nanjing.gov.cn/xxgkn/zcfgk/'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99207'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['docreltime']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_nanjingarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_nanjingarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    res = Selector(text=html)
    pub_date = ''.join(res.xpath('//table[@class="t1"]//td[contains(text(),"生成日期")]/following::td[1]//text()').extract()).strip()
    pub_year = pub_date[:4]
    title = ''.join(res.xpath('//div[@class="main_bl left"]//h2//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="t1"]//td[contains(text(),"文") and contains(text(),"号")]/following::td[1]//text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="t1"]//td[contains(text(),"索") and contains(text(),"号")]/following::td[1]//text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="t1"]//td[contains(text(),"信息分类")]/following::td[1]//text()').extract()).strip()
    subject_word = ''.join(res.xpath('//table[@class="t1"]//td[contains(text(),"关 键 词")]/following::td[1]//text()').extract()).strip()
    # written_date = ''.join(res.xpath('//table[@class="xlt_table"]//td[contains(text(),"生") and contains(text(),"期")]/following::td[1]//text()').extract()).strip()
    impl_date = ''.join(res.xpath('//table[@class="t1"]//td[contains(text(),"生效日期")]/following::td[1]//text()').extract()).strip()
    invalid_date = ''.join(res.xpath('//table[@class="t1"]//td[contains(text(),"废止日期")]/following::td[1]//text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//table[@class="xlt_table"]//td[contains(text(),"时") and contains(text(),"效")]/following::td[1]//text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="t1"]//td[contains(text(),"发布机构")]/following::td[1]//text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '南京' + organ

    fulltext_xpath = '//div[@class="wenZhang"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99207'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "NANJING"
    zt_provider = "nanjingcngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    data['impl_date'] = clean_pubdate(impl_date)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//table[@class="t1"])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   江苏省无锡市
def policy_wuxilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('"pageCount":"(\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'mlxc/gggs' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//div[@class="fr i_listbox"]/ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                base_url = f'https://www.wuxi.gov.cn'
                url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99208'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/h3/text()').extract_first().strip()
                date1 = li.xpath('div/p[1]/text()').extract_first().strip()
                date2 = li.xpath('div/p[2]/text()').extract_first().strip()
                article_json["pub_date"] = date2 + date1
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        elif 'mlxc/gggs' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//div[@class="list_zhengce"]/table/tbody/tr')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('h4/a/@href').extract_first()
                base_url = f'https://www.wuxi.gov.cn'
                url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99208'
                article_json["url"] = url
                article_json["title"] = li.xpath('h4/a/text()').extract_first().strip()
                article_json["pub_date"] = ''
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//div[@class="box_list"]/ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                base_url = f'https://www.wuxi.gov.cn'
                url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99208'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_wuxiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_wuxiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    if not pub_date:
        pub_date = ''.join(res.xpath('//div[@class="table_pc"]//td[contains(text(),"公开日期")]/following::td[1]/text()').extract()).strip()
        pub_year = pub_date[:4]

    title = ''.join(res.xpath('//div[@class="article"]//h1/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="table_pc"]//td[contains(text(),"文件编号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="table_pc"]//td[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//span[contains(text(),"主") and contains(text(),"类")]/parent::div[1]/font/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="table_pc"]//td[contains(text(),"生成日期")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="table_pc"]//td[contains(text(),"效力状况")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="table_pc"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('市'):
        organ = '无锡' + organ

    fulltext_xpath = '//div[@id="Zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99208'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "WUXI"
    zt_provider = "wuxicngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[@class="table_pc"])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   江苏省徐州市
def policy_xzlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('total: (\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count/15)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'mlxc/gggs' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@id="list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.xz.gov.cn'
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99209'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_xzlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        tcount = html_json['custom']['total']
        total_page = math.ceil(tcount/20)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['custom']['data']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['infourl']
            base_url = f'http://www.xz.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99209'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['infodate']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_xzarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_xzarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    provider_url = article_json['url']
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[contains(@class,"ewb-article")]/h3//text()|//h1[@id="ivs_title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[contains(@class,"main-tb")]//td[contains(text(),"文号")]/following::td[1]//text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[contains(@class,"main-tb")]//td[contains(text(),"索引号")]/following::td[1]//text()').extract()).strip()
    # subject = ''.join(res.xpath('//table[@class="t1"]//td[contains(text(),"信息分类")]/following::td[1]//text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//table[@class="t1"]//td[contains(text(),"关 键 词")]/following::td[1]//text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[contains(@class,"main-tb")]//td[contains(text(),"成文日期")]/following::td[1]//text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//table[@class="t1"]//td[contains(text(),"生效日期")]/following::td[1]//text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//table[@class="t1"]//td[contains(text(),"废止日期")]/following::td[1]//text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//table[@class="xlt_table"]//td[contains(text(),"时") and contains(text(),"效")]/following::td[1]//text()').extract()).strip()
    organ = ''.join(res.xpath('//table[contains(@class,"main-tb")]//td[contains(text(),"发布机构")]/following::td[1]//text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '徐州' + organ

    fulltext_xpath = '//div[contains(@class,"mian-cont")]|//div[contains(@class,"ewb-article-info")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99209'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "XZ"
    zt_provider = "xzcngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   江苏省常州市
def policy_changzhoulist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('Fx_PageDiv2_1_4">(\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"{list_json['page_info']}_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'ns_class/zwgk_10_18_01' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//table[@class="border2 f15"]/tr')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[1]/a/@href').extract_first()
                base_url = f'http://www.changzhou.gov.cn'
                url = base_url + href
                rawid = url.split('/')[-1]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99210'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[1]/a/text()|td[1]/a/font/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('td[2]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//table[@class="box3"]/tr')[1:]
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[2]/a/@href').extract_first()
                base_url = f'http://www.changzhou.gov.cn'
                url = base_url + href
                rawid = url.split('/')[-1]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99210'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[2]/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('td[3]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_changzhouarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_changzhouarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//table[@class="govinfo_frame"]//span[contains(text(),"信息名称")]/parent::td[1]/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="NewsTitle"]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="govinfo_frame"]//span[contains(text(),"文件编号")]/parent::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="govinfo_frame"]//span[contains(text(),"索") and contains(text(),"号")]/parent::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="govinfo_frame"]//span[contains(text(),"主题分类")]/parent::td[1]/text()[1]').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="govinfo_frame"]//span[contains(text(),"产生日期")]/parent::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="govinfo_frame"]//span[contains(text(),"废止日期")]/parent::td[1]/span/span/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="govinfo_frame"]//span[contains(text(),"发布机构")]/parent::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('市'):
        organ = '常州' + organ

    fulltext_xpath = '//td[@class="GovInfoContent"]|//td[@id="czfxfontzoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99210'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CHANGZHOU"
    zt_provider = "changzhoucngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   江苏省苏州市
def policy_suzhoulist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPageHTML\('page_div'(\d+)", para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall("共&nbsp;(\d+)&nbsp;页", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                if 'channel_id' in callmodel.sql_model.list_rawid:
                    dic = {"page_info": f"currentPage={page}"}
                else:
                    dic = {"page_info": f"_{page}.shtml"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'channel_id' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//tr[@class="tr_main_value_odd"]')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[1]/a/@href').extract_first()
                base_url = f'http://www.suzhou.gov.cn'
                url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99211'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[1]/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('td[3]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        elif 'zdly_zcjd_wzjd' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//div[@class="zdly-zcjd-wzjd zdly-list-col2"]/ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('div/h4/a/@href').extract_first()
                base_url = f'https://www.suzhou.gov.cn'
                url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99211'
                article_json["url"] = url
                article_json["title"] = li.xpath('div/h4/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('dl/dd[2]/text()|div/dl/dd[2]/text()').extract_first().replace('发布日期：', '').strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//div[@class="pageList infoList listContent"]/ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('h4/a/@href').extract_first()
                base_url = f'https://www.suzhou.gov.cn'
                url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99211'
                article_json["url"] = url
                article_json["title"] = li.xpath('h4/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('h4/sapn/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_suzhoulist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        tcount = html_json['data']['allRow']
        total_page = math.ceil(tcount/15)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['data']['list']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['url']
            base_url = f'https://www.suzhou.gov.cn/szsrmzf/szfgfxwjk/zfgfxwjk_list.shtml'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99211'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['gbTimeFormatDate']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_suzhouarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_suzhouarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    provider_url = article_json['url']
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="contt"]//text()|//h1[@class="article-title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="meta-main"]//font[contains(text(),"文") and contains(text(),"号")]/ancestor::dd[1]/div/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="meta-main"]//font[contains(text(),"索") and contains(text(),"号")]/ancestor::dd[1]/div/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="meta-main"]//font[contains(text(),"分") and contains(text(),"类")]/ancestor::dd[1]/div//text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//table[@class="t1"]//td[contains(text(),"关 键 词")]/following::td[1]//text()').extract()).strip()
    # written_date = ''.join(res.xpath('//table[contains(@class,"main-tb")]//td[contains(text(),"成文日期")]/following::td[1]//text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//table[@class="t1"]//td[contains(text(),"生效日期")]/following::td[1]//text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//table[@class="t1"]//td[contains(text(),"废止日期")]/following::td[1]//text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="meta-main"]//font[contains(text(),"时") and contains(text(),"效")]/ancestor::dd[1]/div/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="meta-main"]//font[contains(text(),"发") and contains(text(),"构")]/ancestor::dd[1]/div/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '苏州' + organ

    fulltext_xpath = '//div[@id="zoomcon"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99211'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "SUZHOU"
    zt_provider = "suzhou"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   江苏省南通市
def policy_nantonglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count / 10)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1) * 10 + 1
                end = (page + 2) * 10
                if end >= max_count:
                    end = max_count
                dic = {"start": start, "end": end, "page_info": list_json["page_info"]}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        res = Selector(text=html_json['result'])
        li_list = res.xpath('//record')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a[1]/@href').extract_first()
            base_url = f'http://www.nantong.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = base_url + href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99212'
            article_json["url"] = url
            article_json["title"] = li.xpath('a[1]/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span[1]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_nantonglist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("totalCount = '(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count/15)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//table[@class="table-box"]/tbody/tr')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('td[2]/a/@href').extract_first()
            base_url = f'http://www.nantong.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99212'
            article_json["url"] = url
            article_json["title"] = li.xpath('td[2]/a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('td[5]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_nantongarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_nantongarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    provider_url = article_json['url']
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="title"]//text()|//div[@class="container-main-title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="table_content"]//strong[contains(text(),"文号")]/parent::td[1]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="table_content"]//strong[contains(text(),"索引号")]/parent::td[1]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="table_content"]//strong[contains(text(),"分类")]/parent::td[1]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//table[@class="t1"]//td[contains(text(),"关 键 词")]/following::td[1]//text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="table_content"]//strong[contains(text(),"成文日期")]/parent::td[1]/following::td[1]/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//table[@class="t1"]//td[contains(text(),"生效日期")]/following::td[1]//text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//table[@class="t1"]//td[contains(text(),"废止日期")]/following::td[1]//text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="table_content"]//strong[contains(text(),"有效性")]/parent::td[1]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="table_content"]//strong[contains(text(),"发布机构")]/parent::td[1]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '南通' + organ

    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99212'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "NANTONG"
    zt_provider = "nantongcngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   江苏省连云港市
def policy_lyglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count / 10)

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1) * 10 + 1
                end = (page + 2) * 10
                if end >= max_count:
                    end = max_count
                dic = {"start": start, "end": end, "page_info": list_json["page_info"]}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        res = Selector(text=html_json['result'])
        li_list = res.xpath('//record')
        for li in li_list:
            print(li.extract())
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            list_rawid = callmodel.sql_model.list_rawid
            if 'onclick' in li.extract():
                href_info = li.xpath('.//a/@onclick').extract_first()
                href = re.findall("\('(.*?)'\)", href_info)[0]
            else:
                href = li.xpath('.//a/@href').extract_first()
            base_url = f'http://www.lyg.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99213'
            article_json["url"] = url
            article_json["title"] = li.xpath('.//a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('.//span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_lyglist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("totalCount = '(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count/10)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//table[@class="table-box"]/tbody/tr')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('td[2]/a/@href').extract_first()
            base_url = f'http://www.lyg.gov.cn/TrueCMS/searchController/toSearch/147d8e37-a461-4dbc-863d-0fa52d92bb44/xxgk.do'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99213'
            article_json["url"] = url
            article_json["title"] = li.xpath('td[2]/a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('td[5]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_lyglist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        tcount = html_json['fyParams']['count']
        total_page = math.ceil(tcount / 18)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['content']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['htmlpath']
            base_url = f'http://www.lyg.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99213'
            article_json["url"] = url
            article_json["title"] = li['msgtitle']
            article_json["pub_date"] = li['releasetime']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_lygarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_lygarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    provider_url = article_json['url']
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="tab-page"]//td[contains(text(),"标")and contains(text(),"题")]/following::td[1]/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[contains(@class,"main-page")]/h1/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@id="liebiao"]//div[contains(@class,"main-page")]/div[1]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="tab-page"]//td[contains(text(),"文")and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="tab-page"]//td[contains(text(),"索")and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="tab-page"]//td[contains(text(),"分")and contains(text(),"类")]/following::td[1]/text()').extract()).strip()
    subject_word = ''.join(res.xpath('//div[@class="tab-page"]//td[contains(text(),"主")and contains(text(),"词")]/following::td[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//table[@class="table_content"]//strong[contains(text(),"成文日期")]/parent::td[1]/following::td[1]/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//table[@class="t1"]//td[contains(text(),"生效日期")]/following::td[1]//text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//table[@class="t1"]//td[contains(text(),"废止日期")]/following::td[1]//text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="tab-page"]//td[contains(text(),"时")and contains(text(),"效")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="tab-page"]//td[contains(text(),"发")and contains(text(),"构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '连云港' + organ

    fulltext_xpath = '//div[@id="liebiao"]//div[@class="zhengwen_div"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[@id="zoom"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99213'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "LYG"
    zt_provider = "lygcngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   江苏省淮安市
def policy_huaianlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("nsetpage\(\d+,(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count / 15)

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//div[@class="lb-lb"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            # base_url = f'http://www.lyg.gov.cn'
            base_url = f'http://www.huaian.gov.cn/{callmodel.sql_model.list_rawid}.html'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99214'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('div/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_huaianlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        tcount = html_json['value']['total']
        total_page = math.ceil(tcount / 10)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['value']['list']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['mpath']
            # base_url = f'http://www.huaian.gov.cn'
            base_url = li['domain']
            # url = parse.urljoin(base_url, href)
            url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99214'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['release_time'][:10]
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_huaianlist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        tcount = html_json['value']['total']
        total_page = math.ceil(tcount / 10)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['value']['list']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['path']
            # base_url = f'http://www.huaian.gov.cn'
            base_url = li['domain']
            # url = parse.urljoin(base_url, href)
            url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99214'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['releaseTime']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_huaianarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_huaianarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    provider_url = article_json['url']
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="bt"]//text()|//div[@class="nr-bt"]//text()|//h1[@class="bt"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if 'meta-data' in html:
        pub_no = ''.join(res.xpath('//div[@id="ysj"]//font[contains(text(),"文") and contains(text(),"号")]/following::div[1]/text()').extract()).strip()
        index_no = ''.join(res.xpath('//div[@class="meta-data"]//font[contains(text(),"索") and contains(text(),"号")]/following::div[1]/text()').extract()).strip()
        subject = ''.join(res.xpath('//div[@class="meta-data"]//font[contains(text(),"主") and contains(text(),"类")]/following::div[1]/text()').extract()).strip()
        subject_word = ''
        organ = ''.join(res.xpath('//div[@class="meta-data"]//font[contains(text(),"发") and contains(text(),"构")]/following::div[1]/text()').extract()).strip()
    else:
        pub_no = ''.join(res.xpath('//tr[@id="ysj"]//td[contains(text(),"文") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
        index_no = ''.join(res.xpath('//table[@class="wk1"]//td[contains(text(),"索") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
        subject = ''.join(res.xpath('//table[@class="wk1"]//td[contains(text(),"主") and contains(text(),"类")]/following::td[1]/text()').extract()).strip()
        subject_word = ''.join(res.xpath('//table[@class="wk1"]//td[contains(text(),"关") and contains(text(),"词")]/following::td[1]/text()').extract()).strip()
        organ = ''.join(res.xpath('//table[@class="wk1"]//td[contains(text(),"发") and contains(text(),"构")]/following::td[1]/text()').extract()).strip()
    if not organ:
        organ = ''.join(res.xpath('//span[@id="origin"]/text()').extract()).strip()
        organ = organ.replace('来源：', '').strip()
        if not (organ.endswith('局') or organ.endswith('会') or organ.endswith('办公室')):
            organ = ''
    if not organ:
        organ_info = re.findall('市.{1,10}?(局|委员会|办公室)', title)
        organ = organ_info[0] if organ_info else ''
    if organ.startswith('市'):
        organ = '淮安' + organ

    fulltext_xpath = '//div[@id="zoom"]|//div[@class="wz3"]|//div[@id="content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99214'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "HUAIAN"
    zt_provider = "huaiancngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   江苏省盐城市
def policy_yanchenglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count / 25)

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1) * 25 + 1
                end = (page + 2) * 25
                if end >= max_count:
                    end = max_count
                dic = {"start": start, "end": end, "page_info": list_json["page_info"]}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.yancheng.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99215'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('b/text()|text()').extract_first().replace('[', '').replace(']', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_yanchenglist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall(';共(\d+)页&', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page +1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"{list_json['page_info']}_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'zfxxgk1' in list_rawid:
        li_list = res.xpath('//form[@id="searchform"]//following::table[1]/tr')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('td[1]/a/@href').extract_first()
            # base_url = f'http://www.tjhd.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            base_url = f'http://www.yancheng.gov.cn/col/col23437/index.html'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99215'
            article_json["url"] = url
            article_json["title"] = li.xpath('td[1]/a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('td[2]/text()').extract_first().replace('[', '').replace(']', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_yanchengarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_yanchengarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    provider_url = article_json['url']
    res = Selector(text=html)
    title = ''.join(res.xpath('//p[@class="con-title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="xxgk_table"]//td[contains(text(),"文号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="xxgk_table"]//td[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="xxgk_table"]//td[contains(text(),"组配分类")]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//table[@class="wk1"]//td[contains(text(),"关")and contains(text(),"词")]/following::td[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//table[@class="table_content"]//strong[contains(text(),"成文日期")]/parent::td[1]/following::td[1]/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//table[@class="t1"]//td[contains(text(),"生效日期")]/following::td[1]//text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//table[@class="t1"]//td[contains(text(),"废止日期")]/following::td[1]//text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="xxgk_table"]//td[contains(text(),"时效")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="xxgk_table"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '盐城' + organ

    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99215'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "YANCHENG"
    zt_provider = "yanchenggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   江苏省扬州市
def policy_yangzhoulist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        tcount = html_json['data']['allRow']
        total_page = math.ceil(tcount / 20)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['data']['list']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['MURL']
            base_url = f'http://www.yangzhou.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99216'
            article_json["url"] = url
            article_json["title"] = li['TITLE']
            article_json["pub_date"] = li['PTIME']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_yangzhoulist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        tcount = html_json['data']['page']['allRow']
        total_page = math.ceil(tcount / 20)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['data']['page']['list']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['URL_COMP']
            base_url = f'http://www.yangzhou.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99216'
            article_json["url"] = url
            article_json["title"] = li['TITLE']
            article_json["pub_date"] = li['PTIME']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_yangzhouarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_yangzhouarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    provider_url = article_json['url']
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="detail-title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@id="xxgksyh"]//font[contains(text(),"文　　号")]/ancestor::dd[1]/div/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@id="xxgksyh"]//font[contains(text(),"索 引 号")]/ancestor::dd[1]/div/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@id="xxgksyh"]//font[contains(text(),"主题分类")]/ancestor::dd[1]/div/text()').extract()).strip()
    subject_word = ''.join(res.xpath('//div[@id="xxgksyh"]//font[contains(text(),"关 键 词")]/ancestor::dd[1]/div/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@id="xxgksyh"]//font[contains(text(),"生成日期")]/ancestor::dd[1]/div/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//table[@class="t1"]//td[contains(text(),"生效日期")]/following::td[1]//text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//table[@class="t1"]//td[contains(text(),"废止日期")]/following::td[1]//text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@id="xxgksyh"]//font[contains(text(),"时　　效")]/ancestor::dd[1]/div/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@id="xxgksyh"]//font[contains(text(),"发布机构")]/ancestor::dd[1]/div/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '扬州' + organ

    fulltext_xpath = '//div[@id="zoomcon"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99216'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "YANGZHOU"
    zt_provider = "yangzhoucngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  江苏省镇江市
def policy_zhenjianglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPageHTML\('page_div',(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'zfxxgk1' in list_rawid:
        li_list = res.xpath('//div[@class="pageList"]//ul/li|//ul[@class="pageList newsList"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            # base_url = f'http://www.tjbc.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            base_url = f'http://www.zhenjiang.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99217'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_zhenjiangarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_zhenjiangarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    provider_url = article_json['url']
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="detail-title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="meta-data"]//font[contains(text(),"文") and contains(text(),"号")]/ancestor::dd[1]/div/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="meta-data"]//font[contains(text(),"索") and contains(text(),"号")]/ancestor::dd[1]/div/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="meta-data"]//font[contains(text(),"分") and contains(text(),"类")]/ancestor::dd[1]/div//text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@id="xxgksyh"]//font[contains(text(),"关 键 词")]/ancestor::dd[1]/div/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//div[@id="xxgksyh"]//font[contains(text(),"生成日期")]/ancestor::dd[1]/div/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//table[@class="t1"]//td[contains(text(),"生效日期")]/following::td[1]//text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//table[@class="t1"]//td[contains(text(),"废止日期")]/following::td[1]//text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="meta-data"]//font[contains(text(),"时") and contains(text(),"效")]/ancestor::dd[1]/div/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="meta-data"]//font[contains(text(),"发") and contains(text(),"构")]/ancestor::dd[1]/div/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '镇江' + organ

    fulltext_xpath = '//div[@id="zoomcon"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99217'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "ZHENJIANG"
    zt_provider = "zhenjiangcngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   江苏省泰州市
def policy_taizhoulist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count/25)

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page+1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1)*25 + 1
                end = (page + 2) * 25
                if end >= max_count:
                    end = max_count
                dic = {"start": start, "end": end, "page_info": list_json["page_info"], "webid": list_json["webid"], "webname": list_json["webname"]}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            if '392' == callmodel.sql_model.list_rawid:
                base_url = f'http://www.taizhou.gov.cn'
            else:
                base_url = f'http://zwgk.taizhou.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99218'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()|b/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_taizhoulist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        max_count = re.findall('count=\\"(\d+)', html_json['data']['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count/15)

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=html_json['data']['html'])
        li_list = res.xpath('//div[@class="zfxxgknr-content"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'https://www.taizhou.gov.cn/zfxxgk/fdzdgknr/zcwj/qtwj/szfbwj/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99218'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()|b/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_taizhouarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_taizhouarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="sp_title"]//text()|//p[@class="con-title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if 'class="xxgk-table"' in html:
        pub_no = ''.join(res.xpath('//table[@class="xxgk-table"]//td[contains(text(),"文") and contains(text(),"号")]/following::td[1]//text()').extract()).strip()
        index_no = ''.join(res.xpath('//table[@class="xxgk-table"]//td[contains(text(),"索") and contains(text(),"号")]/following::td[1]//text()').extract()).strip()
        subject = ''.join(res.xpath('//table[@class="xxgk-table"]//td[contains(text(),"分") and contains(text(),"类")]/following::td[1]//text()').extract()).strip()
        # subject_word = ''.join(res.xpath('//td[contains(text(),"主") and contains(text(),"词")]/following::td[1]//text()').extract()).strip()
        # written_date = ''.join(res.xpath('//td[contains(text(),"发") and contains(text(),"期")]/following::td[1]//text()').extract()).strip()
        # impl_date = ''.join(res.xpath('//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
        # invalid_date = ''.join(res.xpath('//li[contains(text(),"废止日期")]/span/text()').extract()).strip()
        legal_status = ''.join(res.xpath('//table[@class="xxgk-table"]//td[contains(text(),"有") and contains(text(),"性")]/following::td[1]//text()').extract()).strip()
        organ = ''.join(res.xpath('//table[@class="xxgk-table"]//td[contains(text(),"发") and contains(text(),"构")]/following::td[1]//text()').extract()).strip()
    else:
        pub_no = ''.join(res.xpath('//table[@class="xxgk_table"]//td[contains(text(),"文") and contains(text(),"号")]/following::td[1]//text()').extract()).strip()
        index_no = ''.join(res.xpath('//table[@class="xxgk_table"]//td[contains(text(),"索") and contains(text(),"号")]/following::td[1]//text()').extract()).strip()
        subject = ''.join(res.xpath('//table[@class="xxgk_table"]//td[contains(text(),"分") and contains(text(),"类")]/following::td[1]//text()').extract()).strip()
        # subject_word = ''.join(res.xpath('//td[contains(text(),"主") and contains(text(),"词")]/following::td[1]//text()').extract()).strip()
        # written_date = ''.join(res.xpath('//td[contains(text(),"发") and contains(text(),"期")]/following::td[1]//text()').extract()).strip()
        # impl_date = ''.join(res.xpath('//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
        # invalid_date = ''.join(res.xpath('//li[contains(text(),"废止日期")]/span/text()').extract()).strip()
        legal_status = ''.join(res.xpath('//table[@class="xxgk_table"]//td[contains(text(),"有") and contains(text(),"性")]/following::td[1]//text()').extract()).strip()
        organ = ''.join(res.xpath('//table[@class="xxgk_table"]//td[contains(text(),"发") and contains(text(),"构")]/following::td[1]//text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '泰州' + organ

    fulltext_xpath = '//div[@class="main-txt"]|//div[@id="zoom"]|//div[@class="wzzw-article"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99218'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "TAIZHOU"
    zt_provider = "taizhoucngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  江苏省宿迁市
def policy_suqianlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPageHTML\('page_div',(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'zfxxgk1' in list_rawid:
        li_list = res.xpath('//div[@class="channelList"]//ul/li|//ul[@class="listContent"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            # base_url = f'http://www.tjbc.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            base_url = f'http://www.suqian.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99219'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().replace('[', '').replace(']', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_suqianarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_suqianarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    provider_url = article_json['url']
    res = Selector(text=html)
    title = ''.join(res.xpath('//h2[@id="zoomtitle"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="table_content"]//td[contains(text(),"文号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="table_content"]//td[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="table_content"]//td[contains(text(),"分类")]/following::td[1]/text()').extract()).strip()
    subject_word = ''.join(res.xpath('//table[@class="table_content"]//td[contains(text(),"关键词")]/following::td[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//div[@id="xxgksyh"]//font[contains(text(),"生成日期")]/ancestor::dd[1]/div/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//table[@class="t1"]//td[contains(text(),"生效日期")]/following::td[1]//text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//table[@class="t1"]//td[contains(text(),"废止日期")]/following::td[1]//text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="table_content"]//td[contains(text(),"时效")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="table_content"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '宿迁' + organ

    fulltext_xpath = '//div[@id="zoomcon"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99219'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "SUQIAN"
    zt_provider = "suqiangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  上海市发展和改革委员会
def policy_fgwshlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("totalPage: (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        if 'fgw_zcwjfl' == callmodel.sql_model.list_rawid:
            max_count = 95
        total_page = max_count

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'zfxxgk1' in list_rawid:
        li_list = res.xpath('//ul[contains(@class,"news-list")]/li|//ul[@class="zzwj-list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            # base_url = f'http://www.tjbc.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            base_url = f'https://fgw.sh.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99220'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()|p/span[contains(text(),"发布日期：")]/text()').extract_first().replace('发布日期：', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_fgwsharticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_fgwsharticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    provider_url = article_json['url']
    res = Selector(text=html)
    title = ''.join(res.xpath('//h2[@id="ivs_title"]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//span[contains(text(),"文号：")]//text()').extract()).strip()
    pub_no = pub_no.replace('文号：', '')
    # index_no = ''.join(res.xpath('//table[@class="table_content"]//td[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//table[@class="table_content"]//td[contains(text(),"分类")]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//table[@class="table_content"]//td[contains(text(),"关键词")]/following::td[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//div[@id="xxgksyh"]//font[contains(text(),"生成日期")]/ancestor::dd[1]/div/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//table[@class="t1"]//td[contains(text(),"生效日期")]/following::td[1]//text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//table[@class="t1"]//td[contains(text(),"废止日期")]/following::td[1]//text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//table[@class="table_content"]//td[contains(text(),"时效")]/following::td[1]/text()').extract()).strip()
    # organ = ''.join(res.xpath('//table[@class="table_content"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    # if organ.startswith('市'):
    #     organ = '宿迁' + organ

    fulltext_xpath = '//div[@id="ivs_content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99220'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "FGWSH"
    zt_provider = "fgwshgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//table[@class="table_content"])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  上海市经济和信息化委员会
def policy_sheitcshlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("totalPage: (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'zfxxgk1' in list_rawid:
        li_list = res.xpath('//ul[@class="j-list-ul"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            # base_url = f'http://www.tjbc.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            base_url = f'http://www.sheitc.sh.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99221'
            article_json["url"] = url
            article_json["title"] = li.xpath('a//h2/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('a//span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_sheitcsharticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_sheitcsharticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    provider_url = article_json['url']
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[contains(@class,"text-tit")]/div[1]/text()|//h1[@id="ivs_title"]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if not pub_date or '0000' in pub_date:
        pub_date_info = res.xpath('//span[@id="ivs_date"]/text()').extract_first()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]


    fulltext_xpath = '//div[@class="text-main fs-2"]|//div[@id="ivs_content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99221'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "SHEITCSH"
    zt_provider = "sheitcshgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  上海市科学技术委员会
def policy_stcsmshlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("totalPage: (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'zfxxgk1' in list_rawid:
        li_list = res.xpath('//ul[@class="yjsnews"]/li|//ul[@class="lis_bd common"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            # base_url = f'http://www.tjbc.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            base_url = f'https://stcsm.sh.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99222'
            article_json["url"] = url
            article_json["title"] = ''.join(li.xpath('a//text()').extract()).strip()
            article_json["pub_date"] = li.xpath('span[@class="date"]/text()|span[@class="f_r xxgktime"]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_stcsmsharticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_stcsmsharticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    provider_url = article_json['url']
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@id="ivs_title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    # pub_no = ''.join(res.xpath('//table[@class="table_content"]//td[contains(text(),"文号")]/following::td[1]/text()').extract()).strip()
    # index_no = ''.join(res.xpath('//table[@class="table_content"]//td[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//table[@class="table_content"]//td[contains(text(),"分类")]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//table[@class="table_content"]//td[contains(text(),"关键词")]/following::td[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//div[@id="xxgksyh"]//font[contains(text(),"生成日期")]/ancestor::dd[1]/div/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//table[@class="t1"]//td[contains(text(),"生效日期")]/following::td[1]//text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//table[@class="t1"]//td[contains(text(),"废止日期")]/following::td[1]//text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//table[@class="table_content"]//td[contains(text(),"时效")]/following::td[1]/text()').extract()).strip()
    # organ = ''.join(res.xpath('//table[@class="table_content"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    # if organ.startswith('市'):
    #     organ = '宿迁' + organ

    fulltext_xpath = '//div[@id="ivs_content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99222'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "STCSMSH"
    zt_provider = "stcsmshgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  上海市教育委员会
def policy_edushlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("totalPage: (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'zfxxgk1' in list_rawid:
        li_list = res.xpath('//ul[@id="listContent"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            # base_url = f'http://www.tjbc.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            base_url = f'https://edu.sh.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99223'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_edusharticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_edusharticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    provider_url = article_json['url']
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@id="ivs_title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    # pub_no = ''.join(res.xpath('//table[@class="table_content"]//td[contains(text(),"文号")]/following::td[1]/text()').extract()).strip()
    # index_no = ''.join(res.xpath('//table[@class="table_content"]//td[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//table[@class="table_content"]//td[contains(text(),"分类")]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//table[@class="table_content"]//td[contains(text(),"关键词")]/following::td[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//div[@id="xxgksyh"]//font[contains(text(),"生成日期")]/ancestor::dd[1]/div/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//table[@class="t1"]//td[contains(text(),"生效日期")]/following::td[1]//text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//table[@class="t1"]//td[contains(text(),"废止日期")]/following::td[1]//text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//table[@class="table_content"]//td[contains(text(),"时效")]/following::td[1]/text()').extract()).strip()
    # organ = ''.join(res.xpath('//table[@class="table_content"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    # if organ.startswith('市'):
    #     organ = '宿迁' + organ

    fulltext_xpath = '//div[@id="ivs_content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99223'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "EDUSH"
    zt_provider = "edushgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[@id="ivs_wszqyj"])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  上海市民政局
def policy_mzjshlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("totalPage: (\d+)", para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall('setPage",\d+,(\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'zfxxgk1' in list_rawid:
        li_list = res.xpath('//ul[contains(@class,"list-square")]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            # base_url = f'http://www.tjbc.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            base_url = f'https://mzj.sh.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99224'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            if 'mz-zcwj' == list_rawid:
                pub_date = li.xpath('p/span[2]/text()').extract_first().replace('发布日期：','').strip()
            elif 'MZ_zhuzhan2739_0-2-8-15-55' == list_rawid:
                pub_date = li.xpath('span[3]/text()').extract_first().strip()
            elif 'mz-zxxxgk' == list_rawid:
                pub_date = li.xpath('span[3]/text()').extract_first().strip()
            else:
                pub_date = li.xpath('span/text()').extract_first().strip()
            article_json["pub_date"] = pub_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_mzjsharticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_mzjsharticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    provider_url = article_json['url']
    res = Selector(text=html)
    title = ''.join(res.xpath('//h2[@id="ivs_title"]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[contains(@class,"table-bordered")]//td[contains(text(),"文件编号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[contains(@class,"table-bordered")]//td[contains(text(),"索取号")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//table[@class="table_content"]//td[contains(text(),"分类")]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//table[@class="table_content"]//td[contains(text(),"关键词")]/following::td[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//div[@id="xxgksyh"]//font[contains(text(),"生成日期")]/ancestor::dd[1]/div/text()').extract()).strip()
    impl_date = ''.join(res.xpath('//table[contains(@class,"table-bordered")]//td[contains(text(),"实施日期")]/following::td[1]/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//table[@class="t1"]//td[contains(text(),"废止日期")]/following::td[1]//text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[contains(@class,"table-bordered")]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[contains(@class,"table-bordered")]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '上海' + organ

    fulltext_xpath = '//div[@id="ivs_content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99224'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "MZJSH"
    zt_provider = "mzjshgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  上海市财政局
def policy_czjshlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("totalPage: (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        if 'zss/zfxx/zcfg' == list_rawid:
            li_list = res.xpath('//table[@id="dataList"]/tr')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[1]/a/@href').extract_first()
                # base_url = f'http://www.tjbc.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                base_url = f'https://czj.sh.gov.cn'
                # url = parse.urljoin(base_url, href)
                if 'http' in href:
                    url = href
                else:
                    url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-2]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99225'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[1]/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('td[3]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//ul[@class="uli14 pageList"]/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                # base_url = f'http://www.tjbc.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                base_url = f'https://czj.sh.gov.cn'
                # url = parse.urljoin(base_url, href)
                if 'http' in href:
                    url = href
                else:
                    url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-2]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99225'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_czjsharticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_czjsharticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    provider_url = article_json['url']
    res = Selector(text=html)
    title = ''.join(res.xpath('//h2[@id="ivs_title"]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    # pub_no = ''.join(res.xpath('//table[contains(@class,"table-bordered")]//td[contains(text(),"文件编号")]/following::td[1]/text()').extract()).strip()
    # index_no = ''.join(res.xpath('//table[contains(@class,"table-bordered")]//td[contains(text(),"索取号")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//table[@class="table_content"]//td[contains(text(),"分类")]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//table[@class="table_content"]//td[contains(text(),"关键词")]/following::td[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//div[@id="xxgksyh"]//font[contains(text(),"生成日期")]/ancestor::dd[1]/div/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//table[contains(@class,"table-bordered")]//td[contains(text(),"实施日期")]/following::td[1]/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//table[@class="t1"]//td[contains(text(),"废止日期")]/following::td[1]//text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//table[contains(@class,"table-bordered")]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    # organ = ''.join(res.xpath('//table[contains(@class,"table-bordered")]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    # if organ.startswith('市'):
    #     organ = '上海' + organ

    fulltext_xpath = '//div[@id="ivs_content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99225'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CZJSH"
    zt_provider = "czjshgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[@class="margin-top20"])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  上海市人力资源和社会保障局
def policy_rsjshlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("totalPage: (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        if 'tflfg_17253_17253' == list_rawid:
            li_list = res.xpath('//table[@class="table control-table"]/tbody/tr')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[1]/a/@href').extract_first()
                # base_url = f'http://www.tjbc.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                base_url = f'https://rsj.sh.gov.cn'
                # url = parse.urljoin(base_url, href)
                if 'http' in href:
                    url = href
                else:
                    url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-2]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99226'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[1]/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('td[3]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//ul[contains(@class,"uli14")]/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                # base_url = f'http://www.tjbc.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                base_url = f'https://rsj.sh.gov.cn'
                # url = parse.urljoin(base_url, href)
                if 'http' in href:
                    url = href
                else:
                    url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-2]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99226'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_rsjsharticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_rsjsharticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    provider_url = article_json['url']
    res = Selector(text=html)
    title = ''.join(res.xpath('//h2[@id="ivs_title"]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    # pub_no = ''.join(res.xpath('//table[contains(@class,"table-bordered")]//td[contains(text(),"文件编号")]/following::td[1]/text()').extract()).strip()
    # index_no = ''.join(res.xpath('//table[contains(@class,"table-bordered")]//td[contains(text(),"索取号")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//table[@class="table_content"]//td[contains(text(),"分类")]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//table[@class="table_content"]//td[contains(text(),"关键词")]/following::td[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//div[@id="xxgksyh"]//font[contains(text(),"生成日期")]/ancestor::dd[1]/div/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//table[contains(@class,"table-bordered")]//td[contains(text(),"实施日期")]/following::td[1]/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//table[@class="t1"]//td[contains(text(),"废止日期")]/following::td[1]//text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//table[contains(@class,"table-bordered")]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    # organ = ''.join(res.xpath('//table[contains(@class,"table-bordered")]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    # if organ.startswith('市'):
    #     organ = '上海' + organ

    fulltext_xpath = '//div[@id="ivs_content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99226'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "RSJSH"
    zt_provider = "rsjshgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  上海市农业农村委员会
def policy_nyncwshlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("totalPage: (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'tflfg_17253_17253' == list_rawid:
        li_list = res.xpath('//ul[contains(@class,"list-show-all")]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            # base_url = f'http://www.tjbc.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            base_url = f'http://nyncw.sh.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99227'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_nyncwsharticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_nyncwsharticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    provider_url = article_json['url']
    res = Selector(text=html)
    title = ''.join(res.xpath('//h2[@id="ivs_title"]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="article-info"]//span[contains(text(),"发文字号")]/following::span[1]/text()').extract()).strip()
    # index_no = ''.join(res.xpath('//table[contains(@class,"table-bordered")]//td[contains(text(),"索取号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="article-info"]//span[contains(text(),"主题分类")]/following::span[1]/text()').extract()).strip()
    subject_word = ''.join(res.xpath('//div[@class="article-info"]//span[contains(text(),"主题词")]/following::span[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//div[@id="xxgksyh"]//font[contains(text(),"生成日期")]/ancestor::dd[1]/div/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//table[contains(@class,"table-bordered")]//td[contains(text(),"实施日期")]/following::td[1]/text()').extract()).strip()
    invalid_date = ''.join(res.xpath('//div[@class="article-info"]//span[contains(text(),"失效日期")]/following::span[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//table[contains(@class,"table-bordered")]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="article-info"]//span[contains(text(),"发文机关")]/following::span[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '上海' + organ

    fulltext_xpath = '//div[@id="ivs_content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99227'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "NYNCWSH"
    zt_provider = "nyncwshgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  上海市住房和城乡建设管理委员会
def policy_zjwshlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("totalPage: (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'tflfg_17253_17253' == list_rawid:
        li_list = res.xpath('//ul[contains(@class,"list-date")]/li|//ul[@class="zzwj-list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'https://zjw.sh.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            # base_url = f'https://zjw.sh.gov.cn'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99228'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()|p/span[@class="col-sm-3"]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_zjwsharticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_zjwsharticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    provider_url = article_json['url']
    res = Selector(text=html)
    title = ''.join(res.xpath('//h2[@id="ivs_title"]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    # pub_no = ''.join(res.xpath('//div[@class="article-info"]//span[contains(text(),"发文字号")]/following::span[1]/text()').extract()).strip()
    # index_no = ''.join(res.xpath('//table[contains(@class,"table-bordered")]//td[contains(text(),"索取号")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//div[@class="article-info"]//span[contains(text(),"主题分类")]/following::span[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="article-info"]//span[contains(text(),"主题词")]/following::span[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//div[@id="xxgksyh"]//font[contains(text(),"生成日期")]/ancestor::dd[1]/div/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//table[contains(@class,"table-bordered")]//td[contains(text(),"实施日期")]/following::td[1]/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//div[@class="article-info"]//span[contains(text(),"失效日期")]/following::span[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//table[contains(@class,"table-bordered")]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    # organ = ''.join(res.xpath('//div[@class="article-info"]//span[contains(text(),"发文机关")]/following::span[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    # if organ.startswith('市'):
    #     organ = '上海' + organ

    fulltext_xpath = '//div[@id="ivs_content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99228'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "ZJWSH"
    zt_provider = "zjwshgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  上海市卫生健康委员会
def policy_wsjkwshlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("totalPage: (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'tflfg_17253_17253' == list_rawid:
        li_list = res.xpath('//ul[contains(@class,"list-date")]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            # base_url = f'https://www.xuhui.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            base_url = f'https://wsjkw.sh.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99229'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_wsjkwsharticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_wsjkwsharticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    provider_url = article_json['url']
    res = Selector(text=html)
    title = ''.join(res.xpath('//h2[@id="ivs_title"]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="table-article"]/tbody/tr/td[4]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="table-article"]/tbody/tr/td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//div[@class="article-info"]//span[contains(text(),"主题分类")]/following::span[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="article-info"]//span[contains(text(),"主题词")]/following::span[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//div[@id="xxgksyh"]//font[contains(text(),"生成日期")]/ancestor::dd[1]/div/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//table[contains(@class,"table-bordered")]//td[contains(text(),"实施日期")]/following::td[1]/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//div[@class="article-info"]//span[contains(text(),"失效日期")]/following::span[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//table[contains(@class,"table-bordered")]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="table-article"]/tbody/tr/td[3]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '上海' + organ

    fulltext_xpath = '//div[@id="ivs_content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99229'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "WSJKWSH"
    zt_provider = "wsjkwshgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  上海市徐汇区
def policy_xuhuilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            max_count = re.findall(">共(\d+)页", para_dicts["data"]["1_1"]['html'])
            max_count = int(max_count[0]) if max_count else 1
            total_page = max_count

            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"list_{total_page - page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'tflfg_17253_17253' == list_rawid:
        li_list = res.xpath('//div[@class="pag_wrap"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'https://www.xuhui.gov.cn/xxgk/portal/article/list'
            # base_url = f'https://wsjkw.sh.gov.cn'
            url = parse.urljoin(base_url, href)
            if 'id=' not in url:
                continue
            # rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            rawid = url.split('id=')[-1]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99231'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/p/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('a/b/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_xuhuiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_xuhuiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    provider_url = article_json['url']
    res = Selector(text=html)
    title = ''.join(res.xpath('//h2[@id="ivs_title"]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if 'info_box ' in html:
        pub_no = ''.join(res.xpath('//span[contains(text(),"发文字号 :")]//text()').extract()).strip()
        pub_no = pub_no.split(':')[-1].replace('-', '').strip()
        index_no = ''.join(res.xpath('//span[contains(text(),"索引号 :")]//text()').extract()).strip()
        index_no = index_no.split(':')[-1].replace('-', '').strip()
        subject = ''.join(res.xpath('//span[contains(text(),"主题分类 :")]//text()').extract()).strip()
        subject = subject.split(':')[-1].replace('-', '').strip()
        # subject_word = ''.join(res.xpath('//div[@class="article-info"]//span[contains(text(),"主题词")]/following::span[1]/text()').extract()).strip()
        # written_date = ''.join(res.xpath('//div[@id="xxgksyh"]//font[contains(text(),"生成日期")]/ancestor::dd[1]/div/text()').extract()).strip()
        # impl_date = ''.join(res.xpath('//table[contains(@class,"table-bordered")]//td[contains(text(),"实施日期")]/following::td[1]/text()').extract()).strip()
        # invalid_date = ''.join(res.xpath('//div[@class="article-info"]//span[contains(text(),"失效日期")]/following::span[1]/text()').extract()).strip()
        # legal_status = ''.join(res.xpath('//table[contains(@class,"table-bordered")]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
        organ = ''.join(res.xpath('//span[contains(text(),"发布机构 :")]//text()').extract()).strip()
        organ = organ.split(':')[-1].replace('-', '').strip()
    else:
        pub_no = ''.join(res.xpath('//div[@class="catalog"]//font[contains(text(),"发文字号")]/parent::span[1]/text()').extract()).strip()
        index_no = ''.join(res.xpath('//div[@class="catalog"]//font[contains(text(),"索引号")]/parent::span[1]/text()').extract()).strip()
        subject = ''.join(res.xpath('//div[@class="catalog"]//font[contains(text(),"主题分类")]/parent::span[1]/text()').extract()).strip()
        # subject_word = ''.join(res.xpath('//div[@class="article-info"]//span[contains(text(),"主题词")]/following::span[1]/text()').extract()).strip()
        # written_date = ''.join(res.xpath('//div[@id="xxgksyh"]//font[contains(text(),"生成日期")]/ancestor::dd[1]/div/text()').extract()).strip()
        # impl_date = ''.join(res.xpath('//table[contains(@class,"table-bordered")]//td[contains(text(),"实施日期")]/following::td[1]/text()').extract()).strip()
        # invalid_date = ''.join(res.xpath('//div[@class="article-info"]//span[contains(text(),"失效日期")]/following::span[1]/text()').extract()).strip()
        # legal_status = ''.join(res.xpath('//table[contains(@class,"table-bordered")]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
        organ = ''.join(res.xpath('//div[@class="catalog"]//font[contains(text(),"发布机构")]/parent::span[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('区'):
        organ = '上海市徐汇' + organ

    fulltext_xpath = '//div[@id="ivs_content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99231'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "XUHUI"
    zt_provider = "xuhuigovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[@class="main"]/table)')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   上海市长宁区
def policy_shcnlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        tcount = html_json['custom']['totalcount']
        total_page = math.ceil(tcount / 10)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['custom']['data']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            infoid = li['infoid']
            rowguid = li['rowguid']
            # url = parse.urljoin(base_url, href)
            url = f'http://zwgk.shcn.gov.cn:9091/article.html?infoid={infoid}&rowguid={rowguid}'
            rawid = infoid
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99232'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['infodate']
            article_json["infoid"] = li['infoid']
            article_json["rowguid"] = li['rowguid']
            article_json["zuo"] = '{'
            article_json["you"] = '}'
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_shcnlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        tcount = html_json['custom']['totalcount']
        total_page = math.ceil(tcount / 10)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['custom']['data']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            infoid = li['infoid']
            rowguid = li['rowguid']
            # url = parse.urljoin(base_url, href)
            url = f'http://zwgk.shcn.gov.cn:9091/articleopenday.html?infoid={infoid}'
            rawid = infoid
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99232'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['infodate']
            article_json["infoid"] = li['infoid']
            article_json["rowguid"] = li['rowguid']
            article_json["zuo"] = '{'
            article_json["you"] = '}'
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_shcnarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_shcnarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html_json = json.loads(callmodel.para_dicts['data']['1_1']['html'])
    html = html_json['custom']['data']['infocontent']
    if not html:
        html = ''
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    provider_url = article_json['url']
    res = Selector(text=html)
    title = html_json['custom']['data']['title']
    if not title:
        title = article_json['title'].strip()
    pub_no = html_json['custom']['data']['documentnumber']
    if not pub_no:
        pub_no = ''
    index_no = html_json['custom']['data']['identifier']
    if not index_no:
        index_no = ''
    subject = html_json['custom']['data']['govthemename']
    if not subject:
        subject = ''
    written_date = html_json['custom']['data']['infodate']
    if not written_date:
        written_date = ''
    organ = html_json['custom']['data']['deptname']
    if not organ:
        organ = ''
    if organ.startswith('区'):
        organ = '上海市长宁' + organ

    # fulltext_xpath = '//div[@id="zoomcon"]'
    fulltext = html

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99232'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "SHCN"
    zt_provider = "shcngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'(//body)')
    file_info2 = list()
    file_infos = html_json['custom']['data'].get('attach',"")
    if type(file_infos) is list:
        for file_info in file_infos:
            purl = 'http://zwgk.shcn.gov.cn:9091/pdfjs/web/viewer.html?file=' + file_info["attachurl"]
            dic = {'url': purl, 'name': file_info["attachname"], 'pub_year': pub_year, 'keyid': lngid}
            file_info2.append(dic)
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   上海市静安区
def policy_jinganlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        tcount = html_json['custom']['total']
        total_page = math.ceil(tcount / 10)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['custom']['data']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['infourl']
            base_url = 'https://www.jingan.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99233'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['infodate']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jinganlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        tcount = html_json['custom']['total']
        total_page = math.ceil(tcount / 30)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['custom']['data']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['visiturl']
            base_url = 'https://www.jingan.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99233'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['infodate']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jinganlist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        tcount = html_json['custom']['total']
        total_page = math.ceil(tcount / 10)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['custom']['infodate']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['visiturl']
            base_url = 'https://www.jingan.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99233'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['infodate']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jinganarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jinganarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    provider_url = article_json['url']
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="article-hd"]//span[contains(text(),"标题:")]//text()').extract()).strip()
    title = title.replace('标题:', '').strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="article-hd"]//span[contains(text(),"文号:")]//text()').extract()).strip()
    pub_no = pub_no.replace('文号:', '').strip()
    index_no = ''.join(res.xpath('//div[@class="article-hd"]//span[contains(text(),"索取号:")]//text()').extract()).strip()
    index_no = index_no.replace('索取号:', '').strip()
    subject = ''.join(res.xpath('//div[@class="article-hd"]//span[contains(text(),"主题分类:")]//text()').extract()).strip()
    subject = subject.replace('主题分类:', '').strip()
    # subject_word = ''.join(res.xpath('//div[@class="tab-page"]//td[contains(text(),"主")and contains(text(),"词")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="article-hd"]//span[contains(text(),"成文日期:")]//text()').extract()).strip()
    written_date = written_date.replace('成文日期:', '').strip()
    # impl_date = ''.join(res.xpath('//table[@class="t1"]//td[contains(text(),"生效日期")]/following::td[1]//text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//table[@class="t1"]//td[contains(text(),"废止日期")]/following::td[1]//text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//div[@class="tab-page"]//td[contains(text(),"时")and contains(text(),"效")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="article-hd"]//span[contains(text(),"发布机构:")]//text()').extract()).strip()
    organ = organ.replace('发布机构:', '').strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('区'):
        organ = '上海市静安' + organ

    fulltext_xpath = '//div[@class="article"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99233'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "JINGAN"
    zt_provider = "jingangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  上海市普陀区
def policy_shptlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        if 'search/index' in callmodel.sql_model.list_rawid:
            max_count = re.findall("按体裁分类（(\d+)）", para_dicts["data"]["1_1"]['html'])
            max_count = int(max_count[0]) if max_count else 1
            max_count = math.ceil(max_count/25)
        else:
            max_count = re.findall("记录 .*?/(\d+)", para_dicts["data"]["1_1"]['html'])
            max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                if 'search/index' in callmodel.sql_model.list_rawid:
                    dic = {"page_info": f"page={page}", "url_part":"hudong"}
                else:
                    dic = {"page_info": f"index_{page}.html", "url_part":"www"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'tflfg_17253_17253' == list_rawid:
        li_list = res.xpath('//ul[contains(@class,"uli14")]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'https://www.shpt.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            # base_url = f'https://wsjkw.sh.gov.cn'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99234'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()|div/span[2]/text()').extract_first().replace('发布日期：','').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_shptarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_shptarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    provider_url = article_json['url']
    res = Selector(text=html)
    title = ''.join(res.xpath('//h2[@id="ivs_title"]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[contains(@class,"article-info")]//span[contains(text(),"发文字号")]/following::span[1]//text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[contains(@class,"article-info")]//span[contains(text(),"索引号")]/following::span[1]//text()').extract()).strip()
    subject = ''.join(res.xpath('//div[contains(@class,"article-info")]//span[contains(text(),"主题分类")]/following::span[1]//text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[contains(@class,"article-info")]//span[contains(text(),"成文日期")]/following::span[1]//text()').extract()).strip()
    organ = ''.join(res.xpath('//div[contains(@class,"article-info")]//span[contains(text(),"公开主体")]/following::span[1]//text()').extract()).strip()
    if not organ:
        url_dict = {
            "www.shpt.gov.cn/sww/": "上海市普陀区商务局",
            "www.shpt.gov.cn/jyj/": "上海市普陀区普陀教育局",
            "www.shpt.gov.cn/kw/": "上海市普陀区普陀科技局",
            "gaj.sh.gov.cn/ptga/index.html": "上海市普陀区普陀公安局",
            "www.shpt.gov.cn/sifaju/": "上海市普陀区普陀司法局",
            "www.shpt.gov.cn/rensheju/": "上海市普陀区普陀人社局",
            "www.shpt.gov.cn/guituju/": "上海市普陀区普陀规划资源局",
            "www.shpt.gov.cn/hbj/": "上海市普陀区普陀生态环境局",
            "www.shpt.gov.cn/wenhuaju/": "上海市普陀区普陀文旅局",
            "www.shpt.gov.cn/weijiwei/": "上海市普陀区普陀卫生健康局",
            "www.shpt.gov.cn/scjgj/": "上海市普陀区普陀市场监管局",
            "www.shpt.gov.cn/gzw/": "上海市普陀区普陀国资局",
            "www.shpt.gov.cn/tyj/": "上海市普陀区普陀体育局",
            "www.shpt.gov.cn/lrj/": "上海市普陀区普陀绿容局",
            "www.shpt.gov.cn/fgj/": "上海市普陀区普陀房管局",
        }
        for k, v in url_dict.items():
            if k in provider_url:
                organ = v
                break
    if not organ:
        organ_info = re.findall('普陀区.{1,10}?(局|委员会)', title)
        organ = organ_info[0] if organ_info else ''
    if organ.startswith('普陀区'):
        organ = '上海市' + organ
    if organ.startswith('区'):
        organ = '上海市普陀' + organ

    fulltext_xpath = '//div[@id="ivs_content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99234'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "SHPT"
    zt_provider = "shptgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[@class="main"]/table)')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  上海市杨浦区
def policy_shyplist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("control_page count='(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count/15)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"index_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'tflfg_17253_17253' == list_rawid:
        li_list = res.xpath('//ul[contains(@class,"uli14")]/li|//ul[contains(@class,"uli16")]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'https://www.shyp.gov.cn/{callmodel.sql_model.list_rawid}pageNo=1'
            # base_url = f'https://wsjkw.sh.gov.cn'
            url = parse.urljoin(base_url, href)
            # if 'htm' not in url:
            #     continue
            # rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            if 'id=' not in url:
                raise Exception
            rawid = url.split('id=')[-1].strip()
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99236'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span[@class="time"]/text()|div/span[2]/text()').extract_first().replace('发布日期：','').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_shyparticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_shyparticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    provider_url = article_json['url']
    res = Selector(text=html)
    title = ''.join(res.xpath('//h2[@id="ivs_title"]/text()').extract()).strip()
    if '\n' in title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[contains(@class,"article-info")]//span[contains(text(),"发文字号")]/following::span[1]//text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[contains(@class,"article-info")]//span[contains(text(),"索引号")]/following::span[1]//text()').extract()).strip()
    subject = ''.join(res.xpath('//div[contains(@class,"article-info")]//span[contains(text(),"信息分类")]/following::span[1]//text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="article-info"]//span[contains(text(),"主题词")]/following::span[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//div[contains(@class,"article-info")]//span[contains(text(),"成文日期")]/following::span[1]//text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//table[contains(@class,"table-bordered")]//td[contains(text(),"实施日期")]/following::td[1]/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//div[@class="article-info"]//span[contains(text(),"失效日期")]/following::span[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//table[contains(@class,"table-bordered")]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[contains(@class,"article-info")]//span[contains(text(),"发布机构")]/following::span[1]//text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('区'):
        organ = '上海市杨浦' + organ

    fulltext_xpath = '//div[@id="ivs_content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99236'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "SHYP"
    zt_provider = "shypgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//ul[contains(@class,"nowrapli")])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  上海市闵行区
def policy_shmhlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            page_url = f'https://zwgk.shmh.gov.cn/{callmodel.sql_model.list_rawid}/config.js'
            response = requests.get(page_url)
            max_count = re.findall("PageCount= (\d+)", response.text)
            max_count = int(max_count[0]) if max_count else 1
            total_page = max_count

            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"list_{total_page - page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'tflfg_17253_17253' == list_rawid:
        li_list = res.xpath('//div[@class="news_list"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'https://zwgk.shmh.gov.cn/{callmodel.sql_model.list_rawid}/list_0.htm'
            # base_url = f'https://wsjkw.sh.gov.cn'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99237'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span[1]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_shmharticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_shmharticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    provider_url = article_json['url']
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@id="ivs_title"]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="catalog"]//font[contains(text(),"发文字号")]/ancestor::div[2]/div[2]//text()').extract()).strip()
    # index_no = ''.join(res.xpath('//div[@class="catalog"]//font[contains(text(),"索引号")]/parent::span/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="catalog"]//font[contains(text(),"主题分类")]/ancestor::div[2]/div[2]//text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="article-info"]//span[contains(text(),"主题词")]/following::span[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="catalog"]//font[contains(text(),"成文日期")]/ancestor::div[2]/div[2]//text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//table[contains(@class,"table-bordered")]//td[contains(text(),"实施日期")]/following::td[1]/text()').extract()).strip()
    invalid_date = ''.join(res.xpath('//div[@class="catalog"]//font[contains(text(),"失效日期")]/ancestor::div[2]/div[2]//text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//table[contains(@class,"table-bordered")]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="catalog"]//font[contains(text(),"发文机关")]/ancestor::div[2]/div[2]//text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('区'):
        organ = '上海市闵行' + organ

    fulltext_xpath = '//div[@id="div_content_main"]|//div[@id="ivs_content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99237'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "SHMH"
    zt_provider = "shmhgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    # data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[@class="xx_attach"])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  上海市宝山区
def policy_shbsqlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("记录 .*?/(\d+)", para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall("total : (\d+)", para_dicts["data"]["1_1"]['html'])
            max_count = math.ceil(int(max_count[0]) / 20)
        else:
            max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                if 'infoDirectory' in callmodel.sql_model.list_rawid:
                    dic = {"page_info": f"pageIndex={page}", "url_part": "xxgk"}
                else:
                    dic = {"page_info": f"index_{page}.html", "url_part": "www"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        if 'shbs/gsgg' == list_rawid:
            li_list = res.xpath('//ul[contains(@class,"uli14")]/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                # base_url = f'http://www.shbsq.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                base_url = f'http://www.shbsq.gov.cn'
                # url = parse.urljoin(base_url, href)
                url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-2]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99238'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = ''
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//tbody[@id="infolist"]/tr')[1:]
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[1]/a/@href').extract_first()
                # base_url = f'http://www.shbsq.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                base_url = f'http://xxgk.shbsq.gov.cn'
                # url = parse.urljoin(base_url, href)
                url = base_url + href
                if 'htm' not in url:
                    continue
                # rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                rawid = url.split('infoid=')[-1].strip()
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99238'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[1]/a/div/text()').extract_first().strip()
                if not li.xpath('td[3]/text()').extract_first():
                    continue
                article_json["pub_date"] = li.xpath('td[3]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_shbsqarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_shbsqarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    provider_url = article_json['url']
    res = Selector(text=html)
    title = ''.join(res.xpath('//h3[@id="ivs_title"]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if not pub_date:
        pub_date = ''.join(res.xpath('//span[@id="ivs_date"]/text()').extract()).replace('发布时间：', '').strip()
        pub_year = pub_date[:4]
    pub_no = ''.join(res.xpath('//div[@class="tit"]//label[contains(text(),"发文字号")]/following::p[1]//text()').extract()).strip()
    # index_no = ''.join(res.xpath('//div[contains(@class,"article-info")]//span[contains(text(),"索引号")]/following::span[1]//text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="tit"]//label[contains(text(),"主题分类")]/following::p[1]//text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="article-info"]//span[contains(text(),"主题词")]/following::span[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="tit"]//label[contains(text(),"发文日期")]/following::p[1]//text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//table[contains(@class,"table-bordered")]//td[contains(text(),"实施日期")]/following::td[1]/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//div[@class="article-info"]//span[contains(text(),"失效日期")]/following::span[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//table[contains(@class,"table-bordered")]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="tit"]//label[contains(text(),"机构名称")]/following::p[1]//text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('区'):
        organ = '上海市宝山' + organ

    fulltext_xpath = '//div[@id="ivs_content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99238'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "SHBSQ"
    zt_provider = "shbsqgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    # data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  上海市嘉定区
def policy_jiadinglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("/共(\d+)页<", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"index_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        li_list = res.xpath('//table[@class="tab1"]//tr')[1:]
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('td[1]/a/@href').extract_first()
            base_url = f'http://www.jiading.gov.cn/{callmodel.sql_model.list_rawid}'
            url = parse.urljoin(base_url, href)
            # rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            rawid = url.split('/')[-1]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99239'
            article_json["url"] = url
            article_json["title"] = li.xpath('td[1]/a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('td[3]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jiadingarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jiadingarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    provider_url = article_json['url']
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@id="ivs_title"]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if not pub_date:
        pub_date_info = ''.join(res.xpath('//div[@class="data-more"]//td[contains(text(),"发文日期:")]/following::td[1]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    if not pub_date:
        pub_date_info = ''.join(res.xpath('//span[contains(text(),"发布日期：")]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    pub_no = ''.join(res.xpath('//div[@class="data-more"]//td[contains(text(),"文 号:")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="data-more"]//td[contains(text(),"索 引 号:")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="data-more"]//td[contains(text(),"主题分类:")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="data-more"]//td[contains(text(),"发布机构:")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('区'):
        organ = '上海市嘉定' + organ

    fulltext_xpath = '//div[@id="ivs_content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99239'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "JIADING"
    zt_provider = "jiadinggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  上海市金山区
def policy_jinshanlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("记录 .*?/(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"index_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        if 'qzf-gfxwj' == list_rawid:
            li_list = res.xpath('//table[@id="Datatable-1"]/tbody/tr')[1:]
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[1]/a/@href').extract_first()
                # base_url = f'http://www.shbsq.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                base_url = f'https://www.jinshan.gov.cn'
                # url = parse.urljoin(base_url, href)
                if 'http' in href:
                    url = href
                else:
                    url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-2]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99241'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[1]/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('td[3]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//ul[contains(@class,"uli14")]/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                # base_url = f'http://www.shbsq.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                base_url = f'https://www.jinshan.gov.cn'
                # url = parse.urljoin(base_url, href)
                if 'http' in href:
                    url = href
                else:
                    url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('infoid=')[-1]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99241'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jinshanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jinshanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    provider_url = article_json['url']
    res = Selector(text=html)
    title = ''.join(res.xpath('//h2[@id="ivs_title"]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="model-tabel row"]//label[contains(text(),"发文字号")]/following::p[1]//text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="model-tabel row"]//label[contains(text(),"索引号")]/following::p[1]//text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="model-tabel row"]//label[contains(text(),"主题分类")]/following::p[1]//text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="article-info"]//span[contains(text(),"主题词")]/following::span[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="model-tabel row"]//label[contains(text(),"成文日期")]/following::p[1]//text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//table[contains(@class,"table-bordered")]//td[contains(text(),"实施日期")]/following::td[1]/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//div[@class="article-info"]//span[contains(text(),"失效日期")]/following::span[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//table[contains(@class,"table-bordered")]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="model-tabel row"]//label[contains(text(),"公开主体")]/following::p[1]//text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('区'):
        organ = '上海市金山' + organ

    fulltext_xpath = '//div[@id="ivs_content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99241'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "JINSHAN"
    zt_provider = "jinshangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[@class="col-md-12"])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   上海市松江区
def policy_songjianglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('"newslistTZGG.html",\d+,\d+,(\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count / 16)

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//ul[@class="news-list-items"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('div/a/@href').extract_first()
            base_url = f'https://www.songjiang.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99242'
            article_json["url"] = url
            article_json["title"] = li.xpath('div/a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_songjianglist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        tcount = html_json['custom']['total']
        total_page = math.ceil(tcount / 20)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['custom']['data']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['visiturl']
            base_url = 'https://www.songjiang.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            # rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99242'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['infodate']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_songjiangarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_songjiangarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    provider_url = article_json['url']
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="ewb-article-tt"]/text()|//h1[@class="doc-tt"]/text()').extract()).strip()
    title = title.replace('标题:', '').strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//ul[contains(@class,"doc-detail-list")]/li[contains(text(),"发文字号")]/span/text()').extract()).strip()
    if '----' == pub_no:
        pub_no = ''
    index_no = ''.join(res.xpath('//ul[contains(@class,"doc-detail-list")]/li[contains(text(),"索")]/span/text()').extract()).strip()
    subject = ''.join(res.xpath('//ul[contains(@class,"doc-detail-list")]/li[contains(text(),"主题分类")]/span/text()').extract()).strip()
    written_date = ''.join(res.xpath('//ul[contains(@class,"doc-detail-list")]/li[contains(text(),"成文日期")]/span/text()').extract()).strip()
    organ = ''.join(res.xpath('//ul[contains(@class,"doc-detail-list")]/li[contains(text(),"公开主体")]/span[1]/text()').extract()).strip()
    if organ.startswith('区'):
        organ = '上海市松江' + organ

    fulltext_xpath = '//div[@id="detail-text"]|//div[@class="ewb-article-detail"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99242'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "SONGJIANG"
    zt_provider = "songjianggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//ul[@id="fujian"])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  上海市青浦区
def policy_shqplist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("totalPage: (\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"index_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        if 'shqp/zwgk/gfxwj/wj' == list_rawid:
            li_list = res.xpath('//table[@id="Datatable-1"]/tbody/tr')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[1]/a/@href').extract_first()
                # base_url = f'http://www.shbsq.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                base_url = f'https://www.shqp.gov.cn'
                # url = parse.urljoin(base_url, href)
                url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-2]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99243'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[1]/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('td[3]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//ul[contains(@class,"news-green")]/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                # base_url = f'http://www.shbsq.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                base_url = f'https://www.shqp.gov.cn'
                # url = parse.urljoin(base_url, href)
                url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('infoid=')[-1]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99243'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/@title').extract_first().strip()
                pub_date1 = li.xpath('a/div[@class="data-time"]/span[1]/text()').extract_first().strip()
                pub_date2 = li.xpath('a/div[@class="data-time"]/span[2]/text()').extract_first().strip()
                article_json["pub_date"] = pub_date1 + pub_date2
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_shqparticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_shqparticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    provider_url = article_json['url']
    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@id="ivs_title"]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="news-nav-content"]//label[contains(text(),"发文字号")]/following::span[1]//text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="news-nav-content"]//label[contains(text(),"索引号")]/following::span[1]//text()').extract()).strip()
    # subject = ''.join(res.xpath('//div[@class="model-tabel row"]//label[contains(text(),"主题分类")]/following::p[1]//text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="article-info"]//span[contains(text(),"主题词")]/following::span[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//div[@class="model-tabel row"]//label[contains(text(),"成文日期")]/following::p[1]//text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//table[contains(@class,"table-bordered")]//td[contains(text(),"实施日期")]/following::td[1]/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//div[@class="article-info"]//span[contains(text(),"失效日期")]/following::span[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//table[contains(@class,"table-bordered")]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="news-nav-content"]//label[contains(text(),"公开主体")]/following::span[1]//text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('区'):
        organ = '上海市青浦' + organ

    fulltext_xpath = '//div[@id="ivs_content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99243'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "SHQP"
    zt_provider = "shqp"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   上海市奉贤区
def policy_fengxianlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('totalPage: (\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"index_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'tflfg_17253_17253' == list_rawid:
        li_list = res.xpath('//div[@class="col-md-20"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('span[@class="title"]/a/@href').extract_first()
            # base_url = f'https://www.shpt.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            base_url = f'https://www.fengxian.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99244'
            article_json["url"] = url
            article_json["title"] = li.xpath('span[@class="title"]/a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span[@class="date"]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_fengxianlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        tcount = html_json['count']
        total_page = math.ceil(tcount / 20)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": page*10}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        li_list = html_json['data']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            uuid = li['id']
            href = li['url']
            # url = parse.urljoin(base_url, href)
            url = f'https://xxgk.fengxian.gov.cn{href}'
            rawid = uuid
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99244'
            article_json["url"] = url
            article_json["title"] = li['name']
            pubDate = li['pubDate'] / 1000
            date_stamp = datetime.datetime.fromtimestamp(pubDate)
            date_str = datetime.datetime.strftime(date_stamp, "%Y-%m-%d %H:%M:%S")
            article_json["pub_date"] = date_str
            article_json["uuid"] = li['id']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_fengxianlist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        tcount = html_json['count']
        total_page = math.ceil(tcount / 10)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": page*10}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        li_list = html_json['data']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            uuid = li['id']
            # url = parse.urljoin(base_url, href)
            url = f'https://xxgk.fengxian.gov.cn/art/info/{callmodel.sql_model.list_rawid}/{uuid}'
            rawid = uuid
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99244'
            article_json["url"] = url
            article_json["title"] = li['name']
            article_json["pub_date"] = li['showTime']
            article_json["uuid"] = li['id']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_fengxianarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_fengxianarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    provider_url = article_json['url']
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@id="title"]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    # pub_no = ''.join(res.xpath(
    #     '//div[contains(@class,"article-info")]//span[contains(text(),"发文字号")]/following::span[1]//text()').extract()).strip()
    # index_no = ''.join(res.xpath(
    #     '//div[contains(@class,"article-info")]//span[contains(text(),"索引号")]/following::span[1]//text()').extract()).strip()
    # subject = ''.join(res.xpath(
    #     '//div[contains(@class,"article-info")]//span[contains(text(),"主题分类")]/following::span[1]//text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="article-info"]//span[contains(text(),"主题词")]/following::span[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath(
    #     '//div[contains(@class,"article-info")]//span[contains(text(),"成文日期")]/following::span[1]//text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//table[contains(@class,"table-bordered")]//td[contains(text(),"实施日期")]/following::td[1]/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//div[@class="article-info"]//span[contains(text(),"失效日期")]/following::span[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//table[contains(@class,"table-bordered")]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    # organ = ''.join(res.xpath(
    #     '//div[contains(@class,"article-info")]//span[contains(text(),"公开主体")]/following::span[1]//text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    # if organ.startswith('区'):
    #     organ = '上海市普陀' + organ

    fulltext_xpath = '//div[@id="ivs_content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99244'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "FENGXIAN"
    zt_provider = "fengxiangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_fengxianarticle1_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_fengxianarticle1_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html_json = json.loads(callmodel.para_dicts['data']['1_1']['html'])
    html = html_json['data']['body']
    if not html:
        html = ''
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    provider_url = article_json['url']
    res = Selector(text=html)
    title = html_json['data']['name']
    text_data = dict()
    for key, value in html_json['data'].items():
        text_data[key] = value if value else ''
    if not title:
        title = article_json['title'].strip()
    pub_no = text_data['wh1'] + text_data['wh2'] + text_data['wh3']
    index_no = text_data['sy1'] + text_data['sy1'] + text_data['sy1']
    subject = text_data['subject']
    # subject_word = ''.join(res.xpath('//div[@id="xxgksyh"]//font[contains(text(),"关 键 词")]/ancestor::dd[1]/div/text()').extract()).strip()
    written_date = text_data['offlineTime']
    impl_date = text_data['effectiveTime']
    invalid_date = text_data['invalidTime']
    # legal_status = ''.join(res.xpath('//div[@id="xxgksyh"]//font[contains(text(),"时　　效")]/ancestor::dd[1]/div/text()').extract()).strip()
    organ = text_data['dept']
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('区'):
        organ = '上海市奉贤' + organ

    # fulltext_xpath = '//div[@id="ivs_content"]'
    fulltext = html
    # if not fulltext:
    #     raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99244'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "FENGXIAN"
    zt_provider = "fengxiangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    # data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['impl_date'] = clean_pubdate(impl_date)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'(//body)')
    file_info2 = list()
    file_infos = html_json['data']['attachList']
    if not file_infos:
        file_infos = []
    for file_info in file_infos:
        purl = file_info["location"]
        dic = {'url': purl, 'name': file_info["original"], 'pub_year': pub_year, 'keyid': lngid}
        file_info2.append(dic)
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   上海市崇明区
def policy_shcmlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        tcount = html_json['custom']['total']
        total_page = math.ceil(tcount / 15)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['custom']['infolist']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['infourl']
            base_url = 'https://www.shcm.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            # rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99245'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['infodatestr']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_shcmarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_shcmarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    provider_url = article_json['url']
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@id="ivs_title"]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="general-top"]//p[contains(text(),"发文字号")]/following::p[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="general-top"]//p[contains(text(),"索引号")]/following::p[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="general-top"]//p[contains(text(),"主题分类")]/following::p[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="tab-page"]//td[contains(text(),"主")and contains(text(),"词")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="general-top"]//p[contains(text(),"成文日期")]/following::p[1]/text()').extract()).strip()
    impl_date = ''.join(res.xpath('//div[@class="general-top"]//p[contains(text(),"生效日期")]/following::p[1]/text()').extract()).strip()
    invalid_date = ''.join(res.xpath('//div[@class="general-top"]//p[contains(text(),"失效日期")]/following::p[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//div[@class="tab-page"]//td[contains(text(),"时")and contains(text(),"效")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="general-top"]//p[contains(text(),"公开主体")]/following::p[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('区'):
        organ = '上海市崇明' + organ

    fulltext_xpath = '//div[@id="ivs_content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99245'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "SHCM"
    zt_provider = "shcmgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['impl_date'] = clean_pubdate(impl_date)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[@class="enclosure"])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   山东省发展和改革委员会
def policy_fgwshandonglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count / 25)

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1) * 25 + 1
                end = (page + 2) * 25
                if end >= max_count:
                    end = max_count
                dic = {"start": start, "end": end}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('.//a/@href').extract_first()
            base_url = f'http://fgw.shandong.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99246'
            article_json["url"] = url
            article_json["title"] = li.xpath('.//a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('.//span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_fgwshandonglist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('共&nbsp;(\d+)&nbsp;页', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"start": start, "end": end, "page_info": list_json["page_info"]}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//div[@class="zfxxgk_zdgkc"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://fgw.shandong.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99246'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/span/text()|a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('b/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_fgwshandongarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_fgwshandongarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="htitle"]//text()').extract()).strip()
    if '\n' in title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    # pub_no = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"文  号:")]/parent::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//td[contains(text(),"制发日期:")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//td[contains(text(),"有效性:")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//td[contains(text(),"发布机构:")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('省'):
        organ = '山东' + organ

    fulltext_xpath = '//div[@class="art_con"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99246'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "FGWSHANDONG"
    zt_provider = "fgwshandonggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    data['organ'] = organ
    # data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   山东省工业和信息化厅
def policy_gxtshandonglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count / 25)

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1) * 25 + 1
                end = (page + 2) * 25
                if end >= max_count:
                    end = max_count
                dic = {"start": start, "end": end}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('.//a/@href').extract_first()
            base_url = f'http://gxt.shandong.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99247'
            article_json["url"] = url
            article_json["title"] = li.xpath('.//a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('.//span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_gxtshandonglist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('共&nbsp;(\d+)&nbsp;页', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"start": start, "end": end, "page_info": list_json["page_info"]}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//div[@class="zfxxgk_zdgkc"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://gxt.shandong.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99247'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/span/text()|a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('b/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_gxtshandongarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_gxtshandongarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="sp_time"]/preceding::div[1]//text()|//div[@class="headline"]/h2//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    # pub_no = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"文  号:")]/parent::td[1]/text()').extract()).strip()
    # index_no = ''.join(res.xpath('//td[contains(text(),"索引号:")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"分  类:")]/parent::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"主题词:")]/parent::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//td[contains(text(),"成文日期:")]/following::td[1]/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//li[contains(text(),"废止日期")]/span/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//td[contains(text(),"有效性:")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//td[contains(text(),"发布机构:")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('省'):
        organ = '山东' + organ

    fulltext_xpath = '//div[@id="zoom"]|//div[@class="news-body"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99247'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "GXTSHANDONG"
    zt_provider = "gxtshandonggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    data['organ'] = organ
    # data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   山东省科学技术厅
def policy_kjtshandonglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count / 25)

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1) * 25 + 1
                end = (page + 2) * 25
                if end >= max_count:
                    end = max_count
                dic = {"start": start, "end": end, "page_info": list_json['page_info']}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('.//a/@href').extract_first()
            base_url = f'http://kjt.shandong.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99248'
            article_json["url"] = url
            article_json["title"] = li.xpath('.//a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('.//span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_kjtshandonglist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('共&nbsp;(\d+)&nbsp;页', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"start": start, "end": end, "page_info": list_json["page_info"]}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//div[@class="zfxxgk_zdgkc"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://kjt.shandong.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99248'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('b/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_kjtshandongarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_kjtshandongarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="detail-title"]/h3//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if not pub_date or '0000' in pub_date:
        pub_date_info = ''.join(res.xpath('//span[@class="updateTime"]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    if not pub_date:
        raise Exception
    pub_no = ''.join(res.xpath('//table//td[contains(text(),"发文字号:")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table//td[contains(text(),"索引号:")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table//td[contains(text(),"组配分类:")]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"主题词:")]/parent::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table//td[contains(text(),"成文日期:")]/following::td[1]/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//li[contains(text(),"废止日期")]/span/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table//td[contains(text(),"有效性:")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table//td[contains(text(),"发布机构:")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('省'):
        organ = '山东' + organ

    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99248'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "KJTSHANDONG"
    zt_provider = "kjtshandonggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   山东省教育厅
def policy_edushandonglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count / 25)

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1) * 25 + 1
                end = (page + 2) * 25
                if end >= max_count:
                    end = max_count
                dic = {"start": start, "end": end}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('.//a/@href').extract_first()
            base_url = f'http://edu.shandong.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99249'
            article_json["url"] = url
            article_json["title"] = li.xpath('.//a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('.//span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_edushandonglist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('共&nbsp;(\d+)&nbsp;页', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"start": start, "end": end, "page_info": list_json["page_info"]}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//div[@class="zfxxgk_zdgkc"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://edu.shandong.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99249'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('b/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_edushandongarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_edushandongarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    # pub_no = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"文  号:")]/parent::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//td[contains(text(),"索引号:")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"分  类:")]/parent::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"主题词:")]/parent::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//td[contains(text(),"成文日期:")]/following::td[1]/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//li[contains(text(),"废止日期")]/span/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//td[contains(text(),"有效性:")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//td[contains(text(),"发布机构:")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('省'):
        organ = '山东' + organ

    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99249'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "EDUSHANDONG"
    zt_provider = "edushandonggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   山东省民政厅
def policy_mztshandonglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count / 25)

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1) * 25 + 1
                end = (page + 2) * 25
                if end >= max_count:
                    end = max_count
                dic = {"start": start, "end": end, "page_info": list_json['page_info']}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('td[2]/a/@href').extract_first()
            base_url = f'http://mzt.shandong.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99250'
            article_json["url"] = url
            article_json["title"] = li.xpath('td[2]/a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('td[3]/text()').extract_first().replace('[', '').replace(']', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_mztshandonglist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('共&nbsp;(\d+)&nbsp;页', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"start": start, "end": end, "page_info": list_json["page_info"]}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//div[@class="zfxxgk_zdgkc"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://mzt.shandong.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99250'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('b/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_mztshandongarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_mztshandongarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//td[@class="t20red"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    # pub_no = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"文  号:")]/parent::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//td[contains(text(),"索引号:")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"分  类:")]/parent::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"主题词:")]/parent::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//td[contains(text(),"制发日期:")]/following::td[1]/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//li[contains(text(),"废止日期")]/span/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//td[contains(text(),"有效性:")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//td[contains(text(),"主办单位:")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('省'):
        organ = '山东' + organ

    fulltext_xpath = '//td[@class="t14Grey"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99250'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "MZTSHANDONG"
    zt_provider = "mztshandonggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   山东省财政厅
def policy_cztshandonglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count / 25)

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1) * 25 + 1
                end = (page + 2) * 25
                if end >= max_count:
                    end = max_count
                dic = {"start": start, "end": end, "page_info": list_json['page_info']}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://czt.shandong.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if '.htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99251'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cztshandonglist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('共&nbsp;(\d+)&nbsp;页', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"start": start, "end": end, "page_info": list_json["page_info"]}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//div[@class="zfxxgk_zdgkc"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://czt.shandong.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99251'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('b/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cztshandonglist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('共&nbsp;(\d+)&nbsp;页', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"start": start, "end": end, "page_info": list_json["page_info"]}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//div[@class="main-left"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://czt.shandong.gov.cn/module/search/index.jsp'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99251'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cztshandongarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_cztshandongarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//p[@class="con-title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//td[contains(text(),"文件编号:")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//td[contains(text(),"索引号:")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"分  类:")]/parent::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"主题词:")]/parent::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//td[contains(text(),"成文日期:")]/following::td[1]/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//li[contains(text(),"废止日期")]/span/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//td[contains(text(),"有效性:")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//td[contains(text(),"发布机构:")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('省'):
        organ = '山东' + organ

    fulltext_xpath = '//div[@class="main-txt"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99251'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CZTSHANDONG"
    zt_provider = "cztshandonggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   山东省人力资源和社会保障厅
def policy_hrssshandonglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//div[@class="list_list"]/ul/li|//div[@class="wei"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('p[@class="list_title_one"]/a/@href|a/@href').extract_first()
            base_url = f'http://hrss.shandong.gov.cn/channels/ch07931/'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99252'
            article_json["url"] = url
            article_json["title"] = li.xpath('p[@class="list_title_one"]/a/text()|a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('p[@class="list_more_one"]/text()|a/span/text()|span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_hrssshandonglist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        max_count = html_json['totalCount']
        total_page = math.ceil(max_count/15)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": str(page*15)}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        li_list = html_json['result']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['URL']
            base_url = f'http://hrss.shandong.gov.cn/channels/ch00342/'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99252'
            article_json["url"] = url
            article_json["title"] = li['NAME']
            article_json["pub_date"] = li['PUBDATE']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_hrssshandongarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_hrssshandongarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    provider_url = article_json['url']
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="show_title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if not pub_date:
        raise Exception
    pub_no = ''.join(res.xpath('//div[@id="tabhd"]//strong[contains(text(),"发文字号：")]/parent::td[1]/text()').extract()).strip()
    # index_no = ''.join(res.xpath('//div[contains(@class,"info_box")]//span[contains(text(),"索引号")]/following::span[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@id="tabhd"]//strong[contains(text(),"成文日期：")]/parent::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@id="tabhd"]//strong[contains(text(),"有效性：")]/parent::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@id="tabhd"]//strong[contains(text(),"发文机关：")]/parent::td[1]/text()').extract()).strip()
    if organ.startswith('省'):
        organ = '山东' + organ

    fulltext_xpath = '//div[@class="show"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99252'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "HRSSSHANDONG"
    zt_provider = "hrssshandonggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    # data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  山东省农业农村厅
def policy_nyncshandonglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("='index_(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[-1]) if max_count else 1
        total_page = max_count

        page_index = int(callmodel.sql_model.page_index)
        sql_dict = callmodel.sql_model.dict()
        di_model_bef = DealInsertModel()
        di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
        sql_dict = deal_sql_dict(sql_dict)
        list_json = json.loads(callmodel.sql_model.list_json)
        for page in range(page_index + 1, total_page):
            sql_dict["page"] = total_page
            sql_dict["page_index"] = page
            dic = {"page_info": f"index_{page}"}
            sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
            # sql_dict["list_json"] = callmodel.sql_model.list_json
            di_model_bef.lists.append(sql_dict.copy())
        result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'zfxxgk1' in list_rawid:
        li_list = res.xpath('//ul[@class="news-list"]/span')
        for i in range(0, len(li_list), 2):
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li_list[i].xpath('a/@href').extract_first()
            base_url = f'http://nync.shandong.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            # base_url = f'http://www.bjchp.gov.cn'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99253'
            article_json["url"] = url
            if 'zwgk/ghjh' in list_rawid:
                article_json["title"] = li_list[i].xpath('a[2]/text()').extract_first().replace('·', '',).strip()
            else:
                article_json["title"] = li_list[i].xpath('a/text()').extract_first().replace('·', '',).strip()
            article_json["pub_date"] = li_list[i+1].xpath('text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_nyncshandongarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_nyncshandongarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="news-cont"]//h2/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    index_no = ''.join(res.xpath('//ul[@class="news-info"]//span[contains(text(),"索引号")]/following::span[1]//text()').extract()).strip()
    subject = ''.join(res.xpath('//ul[@class="news-info"]//span[contains(text(),"主题分类")]/following::span[1]//text()').extract()).strip()
    organ = ''.join(res.xpath('//ul[@class="news-info"]//span[contains(text(),"发布机构：")]/following::span[1]//text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="fl"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('省'):
        organ = '山东' + organ

    fulltext_xpath = '//div[@class="zwnr"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99253'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "NYNCSHANDONG"
    zt_provider = "nyncshandonggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   山东省住房和城乡建设厅
def policy_zjtshandonglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('共&nbsp;(\d+)&nbsp;页', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"start": start, "end": end, "page_info": list_json["page_info"]}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//div[@class="zfxxgk_zdgkc"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://zjt.shandong.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99254'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/b/text()|a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('b/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_zjtshandongarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_zjtshandongarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="wz_title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    # pub_no = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"文  号:")]/parent::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="xxgk_box"]//td[contains(text(),"索引号:")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"分  类:")]/parent::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"主题词:")]/parent::td[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//td[contains(text(),"制发日期:")]/following::td[1]/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//li[contains(text(),"废止日期")]/span/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//td[contains(text(),"有效性:")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="xxgk_box"]//td[contains(text(),"发布机构:")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('省'):
        organ = '山东' + organ

    fulltext_xpath = '//div[@class="wz_wordbox zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99254'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "ZJTSHANDONG"
    zt_provider = "zjtshandonggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  山东省卫生健康委员会
def policy_wsjkwshandonglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            max_count = res.xpath('//div[@class="pager"]/a[last()-1]/text()').extract_first()
            max_count = int(max_count) if max_count and max_count!='>' else 1
            total_page = max_count
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"index_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        if 'zwgk/fdzdgknr/gfxwj' == list_rawid:
            li_list = res.xpath('//table[@class="main_table striped_table"]/tr')[1:]
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[2]/a/@href').extract_first()
                base_url = f'http://wsjkw.shandong.gov.cn//{callmodel.sql_model.list_rawid}/index.html'
                # base_url = f'http://www.bjchp.gov.cn'
                url = parse.urljoin(base_url, href)
                # url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-2]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99255'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[2]/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('td[3]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//ul[@class="news-list"]/span')
            for i in range(0, len(li_list), 2):
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li_list[i].xpath('a/@href').extract_first()
                base_url = f'http://wsjkw.shandong.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                # base_url = f'http://www.bjchp.gov.cn'
                url = parse.urljoin(base_url, href)
                # url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-2]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99255'
                article_json["url"] = url
                article_json["title"] = li_list[i].xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li_list[i + 1].xpath('text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_wsjkwshandongarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_wsjkwshandongarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="news-cont"]//h2/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath('//ul[@class="news-info"]//span[contains(text(),"文号：")]/following::span[1]//text()').extract()).strip()
    index_no = ''.join(res.xpath('//ul[@class="news-info"]//span[contains(text(),"索引号")]/following::span[1]//text()').extract()).strip()
    subject = ''.join(res.xpath('//ul[@class="news-info"]//span[contains(text(),"主题分类")]/following::span[1]//text()').extract()).strip()
    written_date = ''.join(res.xpath('//ul[@class="news-info"]//span[contains(text(),"成文时间：")]/following::span[1]//text()').extract()).strip()
    legal_status = ''.join(res.xpath('//ul[@class="news-info"]//span[contains(text(),"有效性：")]/following::span[1]//text()').extract()).strip()
    organ = ''.join(res.xpath('//ul[@class="news-info"]//span[contains(text(),"发布机构：")]/following::span[1]//text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="fl"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('省'):
        organ = '山东' + organ

    fulltext_xpath = '//div[@class="zwnr"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99255'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "WSJKWSHANDONG"
    zt_provider = "wsjkwshandonggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   山东省济南市
def policy_jinanlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count / 25)

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1) * 25 + 1
                end = (page + 2) * 25
                if end >= max_count:
                    end = max_count
                dic = {"start": start, "end": end, "page_info": list_json['page_info']}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        list_rawid = callmodel.sql_model.list_rawid
        if '27544' == list_rawid:
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a[2]/@href').extract_first()
                base_url = f'http://www.jinan.gov.cn'
                # url = parse.urljoin(base_url, href)
                if 'http' in href:
                    url = href
                else:
                    url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99256'
                article_json["url"] = url
                article_json["title"] = li.xpath('a[2]/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                base_url = f'http://www.jinan.gov.cn'
                # url = parse.urljoin(base_url, href)
                if 'http' in href:
                    url = href
                else:
                    url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99256'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jinanlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('共&nbsp;(\d+)&nbsp;页', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"start": start, "end": end, "page_info": list_json["page_info"]}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//form[@id="searchform"]/following::table[1]/tr')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('td[1]/a/@href').extract_first()
            base_url = f'http://www.jinan.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99256'
            article_json["url"] = url
            article_json["title"] = li.xpath('td[1]/a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('td[2]/span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jinanlist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('共&nbsp;(\d+)&nbsp;页', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"start": start, "end": end, "page_info": list_json["page_info"]}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//div[@class="zfxxgk_zdgkc"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.jinan.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99256'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('b/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jinanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jinanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="sp_time"]/preceding::div[1]//text()|//p[@class="title"]//text()|//div[@class="nyzw_page_top"]//h3/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//td[contains(text(),"文件编号:")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//td[contains(text(),"索引号:")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"分  类:")]/parent::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"主题词:")]/parent::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//td[contains(text(),"成文日期:")]/following::td[1]/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//li[contains(text(),"废止日期")]/span/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//td[contains(text(),"有效性:")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//td[contains(text(),"发布机构:")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '济南' + organ

    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99256'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "JINAN"
    zt_provider = "jinangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   山东省青岛市
def policy_qingdaolist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        list_rawid = callmodel.sql_model.list_rawid
        if 'ywdt/gggs/' == list_rawid or 'qdsgfxwjjs/szfbm_qdsgfxwjjs/' == list_rawid:
            page_index = int(callmodel.sql_model.page_index)
            if page_index == 0:
                max_count = re.findall('trsItemCount = (\d+)', para_dicts["data"]["1_1"]['html'])
                max_count = int(max_count[0]) if max_count else 1
                total_page = max_count
                sql_dict = callmodel.sql_model.dict()
                di_model_bef = DealInsertModel()
                di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
                sql_dict = deal_sql_dict(sql_dict)
                list_json = json.loads(callmodel.sql_model.list_json)
                for page in range(1, total_page + 1):
                    sql_dict["page"] = total_page
                    sql_dict["page_index"] = page
                    num = page // 100
                    dic = {"url_part": list_json['url_part'], "page_info": f"{num}/{page}.json"}
                    sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                    di_model_bef.lists.append(sql_dict.copy())
                result.befor_dicts.insert.append(di_model_bef)
        else:
            max_count = re.findall('PAGE_COUNT="(\d+)', para_dicts["data"]["1_1"]['html'])
            max_count = int(max_count[0]) if max_count else 1
            total_page = max_count
            page_index = int(callmodel.sql_model.page_index)
            if page_index == 0:
                sql_dict = callmodel.sql_model.dict()
                di_model_bef = DealInsertModel()
                di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
                sql_dict = deal_sql_dict(sql_dict)
                list_json = json.loads(callmodel.sql_model.list_json)
                for page in range(1, total_page):
                    sql_dict["page"] = total_page
                    sql_dict["page_index"] = page
                    dic = {"url_part": list_json['url_part'], "page_info": f"index_{page}.shtml"}
                    sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                    di_model_bef.lists.append(sql_dict.copy())
                result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        if 'ywdt/gggs' == list_rawid or 'qdsgfxwjjs/szfbm_qdsgfxwjj' == list_rawid:
            pass
        elif '.json' in callmodel.sql_model.list_json:
            res = json.loads(para_dicts["data"]["1_1"]['html'])
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = res['FILEURL']
            # base_url = f'http://www.nantong.gov.cn'
            base_url = f'http://www.qingdao.gov.cn/{callmodel.sql_model.list_rawid}index.html'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99257'
            article_json["url"] = url
            article_json["title"] = res['TITLE']
            article_json["pub_date"] = res['DOCRELTIME']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        else:
            res = Selector(text=para_dicts["data"]["1_1"]['html'])
            li_list = res.xpath('//div[@class="list_box"]/ul/li|//div[@class="right_list_news"]/ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                if 'fbh.qtv' in href:
                    continue
                base_url = f'http://www.qingdao.gov.cn/{callmodel.sql_model.list_rawid}index.html'
                # base_url = f'http://www.nantong.gov.cn'
                url = parse.urljoin(base_url, href)
                # url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99257'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('//span[contains(text(),"发文日期：")]/text()|//span[@class="dateTime"]/text()|//span[2]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_qingdaolist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        max_count = html_json['data']['total']
        total_page = math.ceil(max_count/10)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        li_list = html_json['data']['list']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['url']
            base_url = f'http://www.qingdao.gov.cn/public/zcwjflck/index.shtml'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99257'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['publishdate']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_qingdaoarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_qingdaoarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    provider_url = article_json['url']
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[contains(@class, "title")]//text()|//div[@class="biaoti"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if not pub_date or '0000' in pub_date:
        pub_date_info = ''.join(res.xpath('//span[contains(text(),"发布日期")]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    if not pub_date or '0000' in pub_date:
        pub_date_info = ''.join(res.xpath('//div[contains(@class,"info_box")]//span[contains(text(),"发布日期")]/following::span[1]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    if not pub_date:
        raise Exception
    pub_no = ''.join(res.xpath('//div[contains(@class,"info_box")]//span[contains(text(),"编号")]/following::span[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[contains(@class,"info_box")]//span[contains(text(),"索引号")]/following::span[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//table[@class="table_content"]//strong[contains(text(),"分类")]/parent::td[1]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//table[@class="t1"]//td[contains(text(),"关 键 词")]/following::td[1]//text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[contains(@class,"info_box")]//span[contains(text(),"成文日期")]/following::span[1]/text()').extract()).strip()
    impl_date = ''.join(res.xpath('//div[contains(@class,"info_box")]//span[contains(text(),"生效日期")]/following::span[1]/text()').extract()).strip()
    invalid_date = ''.join(res.xpath('//div[contains(@class,"info_box")]//span[contains(text(),"失效日期")]/following::span[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[contains(@class,"info_box")]//span[contains(text(),"有效性")]/following::span[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[contains(@class,"info_box")]//span[contains(text(),"制发机关")]/following::span[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '青岛' + organ

    fulltext_xpath = '//div[@id="js_content"]|//div[@class="article_txt"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99257'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "QINGDAO"
    zt_provider = "qingdaogovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['impl_date'] = clean_pubdate(impl_date)
    data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   山东省淄博市
def policy_zibolist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('fdzdgknr_total = parseInt\("(\d+)', para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall('total = parseInt\("(\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count/50)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"index_{page}.shtml"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'gongkai/channel_c1535/?per_page=50&page=' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//div[@class="zcwj-list zcwj-list0"]/ul/li')[1:]
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('p[@class="title"]/a/@href').extract_first()
                base_url = f'http://www.zibo.gov.cn/{callmodel.sql_model.list_rawid}'
                # base_url = f'http://www.nantong.gov.cn'
                url = parse.urljoin(base_url, href)
                # url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99258'
                article_json["url"] = url
                article_json["title"] = li.xpath('p[@class="title"]/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('p[@class="time"]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//ul[contains(@class,"fdzdgknr-right-tab-list")]/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                base_url = f'http://www.zibo.gov.cn/{callmodel.sql_model.list_rawid}'
                # base_url = f'http://www.nantong.gov.cn'
                url = parse.urljoin(base_url, href)
                # url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99258'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/span[@class="title"]/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('a/span[@class="time"]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_zibolist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count / 25)

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1) * 25 + 1
                end = (page + 2) * 25
                if end >= max_count:
                    end = max_count
                dic = {"start": start, "end": end}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.zibo.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99258'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_ziboarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_ziboarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    provider_url = article_json['url']
    res = Selector(text=html)
    title = ''.join(res.xpath('/h1[@class="details-title"]//text()|//table[@id="c"]//td[@class="title"]//text()').extract()).strip().replace('<br/>', '')
    if not title:
        title = article_json['title'].strip().replace('<br/>', '')
    pub_no = ''.join(res.xpath('//table[@class="details-info"]//td[contains(text(),"文号：")]/following::td[1]/text()').extract()).strip()
    if pub_no == '无文号':
        pub_no = ''
    index_no = ''.join(res.xpath('//table[@class="details-info"]//td[contains(text(),"索引号：")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//table[@class="table_content"]//strong[contains(text(),"分类")]/parent::td[1]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//table[@class="t1"]//td[contains(text(),"关 键 词")]/following::td[1]//text()').extract()).strip()
    # written_date = ''.join(res.xpath('//div[contains(@class,"info_box")]//span[contains(text(),"成文日期")]/following::span[1]/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//div[contains(@class,"info_box")]//span[contains(text(),"生效日期")]/following::span[1]/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//div[contains(@class,"info_box")]//span[contains(text(),"失效日期")]/following::span[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="details-info"]//td[contains(text(),"有效性：")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="details-info"]//td[contains(text(),"发布机构：")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '淄博' + organ

    fulltext_xpath = '//div[@id="details-content"]|//div[@id="zoom"]|//div[@id="zhengwen"]|//div[@class="details-content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99258'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "ZIBO"
    zt_provider = "zibogovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   山东省枣庄市
def policy_zaozhuanglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            max_count = res.xpath('//div[@class="pager"]/a[last()-1]/text()').extract_first()
            max_count = int(max_count[-1]) if max_count and '>' != max_count[-1] else 1
            total_page = max_count
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"index_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        if 'zwgk/zt/gzk' == list_rawid:
            li_list = res.xpath('//table[@class="mess_table"]/tr')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[2]/span[@class="gzbt"]/a/@href').extract_first()
                base_url = f'http://www.zaozhuang.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                # base_url = f'http://www.bjchp.gov.cn'
                url = parse.urljoin(base_url, href)
                # url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-2]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99259'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[2]/span[@class="gzbt"]/a/text()').extract_first().replace('·','',1).strip()
                article_json["pub_date"] = ''
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//ul[@class="news-list"]/span')
            for i in range(0, len(li_list), 2):
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li_list[i].xpath('a/@href').extract_first()
                base_url = f'http://www.zaozhuang.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                # base_url = f'http://www.bjchp.gov.cn'
                url = parse.urljoin(base_url, href)
                # url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-2]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99259'
                article_json["url"] = url
                article_json["title"] = li_list[i].xpath('a/text()').extract_first().replace('·','',1).strip()
                article_json["pub_date"] = li_list[i + 1].xpath('text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_zaozhuanglist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('m_nRecordCount = (\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count / 15)

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"start": start, "end": end}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        if '8' == list_rawid or '9' == list_rawid or '51' == list_rawid:
            li_list = res.xpath('//table[@class="table_newest"]/tbody/tr')[1:]
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[2]/a/@href').extract_first()
                base_url = f'http://www.zaozhuang.gov.cn'
                # url = parse.urljoin(base_url, href)
                if 'http' in href:
                    url = href
                else:
                    url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99259'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[2]/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('td[4]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//div[@class="zfxxgk_zdgkc"]/ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                base_url = f'http://www.zaozhuang.gov.cn'
                # url = parse.urljoin(base_url, href)
                if 'http' in href:
                    url = href
                else:
                    url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99259'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('b/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_zaozhuangarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_zaozhuangarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    provider_url = article_json['url']
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="news-cont"]//h2//text()|//div[@class="level3_title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//ul[@class="news-info"]//span[contains(text(),"文") and contains(text(),"号")]/following::span[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//ul[@class="news-info"]//span[contains(text(),"索") and contains(text(),"号")]/following::span[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//ul[@class="news-info"]//span[contains(text(),"主") and contains(text(),"类")]/following::span[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//table[@class="t1"]//td[contains(text(),"关 键 词")]/following::td[1]//text()').extract()).strip()
    written_date = ''.join(res.xpath('//ul[@class="news-info"]//span[contains(text(),"成") and contains(text(),"间")]/following::span[1]/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//div[contains(@class,"info_box")]//span[contains(text(),"生效日期")]/following::span[1]/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//div[contains(@class,"info_box")]//span[contains(text(),"失效日期")]/following::span[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//ul[@class="news-info"]//span[contains(text(),"效") and contains(text(),"态")]/following::span[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//ul[@class="news-info"]//span[contains(text(),"发") and contains(text(),"构")]/following::span[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '枣庄' + organ

    fulltext_xpath = '//div[@class="zwnr"]|//div[@class="gznr"]|//div[@class="level3_content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99259'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "ZAOZHUANG"
    zt_provider = "zaozhuanggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   山东省东营市
def policy_dongyinglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count / 25)

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1) * 25 + 1
                end = (page + 2) * 25
                if end >= max_count:
                    end = max_count
                dic = {"start": start, "end": end, "page_info": list_json['page_info']}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('div/a/@href|a/@href').extract_first()
            base_url = f'http://www.dongying.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99260'
            article_json["url"] = url
            article_json["title"] = li.xpath('div/a/text()|a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('div/span/text()|span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_dongyinglist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('共&nbsp;(\d+)&nbsp;页', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"start": start, "end": end, "page_info": list_json["page_info"]}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//div[@class="zfxxgk_zdgkc"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.dongying.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99260'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('b/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_dongyingarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_dongyingarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//p[@class="con-title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//td[contains(text(),"文件编号:")]/following::td[1]/text()').extract()).strip()
    if not pub_no:
        pub_no = ''.join(res.xpath('//td[contains(text(),"发文字号:")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//td[contains(text(),"索引号:")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//td[contains(text(),"主题:")]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"主题词:")]/parent::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//td[contains(text(),"成文日期:")]/following::td[1]/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
    invalid_date = ''.join(res.xpath('//td[contains(text(),"失效日期:")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//td[contains(text(),"有效性:")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//td[contains(text(),"发布机构:")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '东营' + organ

    fulltext_xpath = '//td[@class="main-txt"]|//div[@class="con-main clearfix"]|//div[@class="nrCon"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99260'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "DONGYING"
    zt_provider = "dongyinggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   山东省烟台市
def policy_yantailist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count / 25)

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1) * 25 + 1
                end = (page + 2) * 25
                if end >= max_count:
                    end = max_count
                dic = {"start": start, "end": end, "page_info": list_json['page_info']}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.yantai.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99261'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span[1]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_yantailist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('共&nbsp;(\d+)&nbsp;页', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"start": start, "end": end, "page_info": list_json["page_info"]}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//div[@class="zfxxgk_zdgkc"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.yantai.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99261'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('b/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_yantailist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # max_count = re.findall('共&nbsp;(\d+)&nbsp;页', para_dicts["data"]["1_1"]['html'])
        # max_count = int(max_count[0]) if max_count else 1
        total_page = 4
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"start": start, "end": end, "page_info": list_json["page_info"]}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//ul[@class="wzy"]//li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('span[1]/a/@href').extract_first()
            base_url = f'http://www.yantai.gov.cn/col/col51213/index.html'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99261'
            article_json["url"] = url
            article_json["title"] = li.xpath('span[1]/a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span[4]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_yantaiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_yantaiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="title"]//h1//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    # pub_no = ''.join(res.xpath('//td[contains(text(),"文件编号:")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//td[contains(text(),"索引号:")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//td[contains(text(),"主题:")]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"主题词:")]/parent::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//td[contains(text(),"成文日期:")]/following::td[1]/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//td[contains(text(),"失效日期:")]/following::td[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//td[contains(text(),"有效性:")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//td[contains(text(),"发布机构:")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '烟台' + organ

    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99261'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "YANTAI"
    zt_provider = "yantaigovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   山东省潍坊市
def policy_weifanglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            total_page = html_json['data'].get('totalPages', "")
            if isinstance(total_page, str):
                total_page = html_json['data']['page'].get('totalPages', "")
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['data'].get('contents', "")
        if isinstance(li_list, str):
            li_list = html_json['data']['page'].get('contents', "")
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            rawid = li['xxid']
            dwid = li['dwid']
            url = f'http://www.weifang.gov.cn/162/{dwid}/{rawid}.html'
            # url = parse.urljoin(base_url, href)
            # rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99262'
            article_json["url"] = url
            article_json["title"] = li['subject']
            article_json["pub_date"] = li['fwdate']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_weifanglist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        # max_count = re.findall('共&nbsp;(\d+)&nbsp;页', para_dicts["data"]["1_1"]['html'])
        # max_count = int(max_count[0]) if max_count else 1
        total_page = 1
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"start": start, "end": end, "page_info": list_json["page_info"]}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//div[@class="nbox"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.weifang.gov.cn/zcwj/swwj/index.html'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99262'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_weifangarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_weifangarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//p[@class="htitle"]//text()|//div[@class="art-title"]/h1//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="detail-table"]//td[contains(text(),"文") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="detail-table"]//td[contains(text(),"索") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="detail-table"]//td[contains(text(),"分") and contains(text(),"类")]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"主题词:")]/parent::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="detail-table"]//td[contains(text(),"成") and contains(text(),"期")]/following::td[1]/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//td[contains(text(),"失效日期:")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="detail-table"]//td[contains(text(),"效") and contains(text(),"态")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="detail-table"]//td[contains(text(),"发") and contains(text(),"构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '潍坊' + organ

    fulltext_xpath = '//div[@id="ozoom"]|//div[@id="mainText"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99262'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "WEIFANG"
    zt_provider = "weifanggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   山东省济宁市
def policy_jininglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count / 25)

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1) * 25 + 1
                end = (page + 2) * 25
                if end >= max_count:
                    end = max_count
                dic = {"start": start, "end": end, "page_info": list_json['page_info']}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.jining.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99263'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('a/span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jininglist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('共&nbsp;(\d+)&nbsp;页', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"start": start, "end": end, "page_info": list_json["page_info"]}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//div[@class="zfxxgk_zdgkc"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.jining.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url or 'jining' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99263'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('b/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jiningarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jiningarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//p[@class="main-title"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    # pub_no = ''.join(res.xpath('//td[contains(text(),"文件编号:")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="xxgk_table"]//td[contains(text(),"索")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//td[contains(text(),"主题:")]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"主题词:")]/parent::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="xxgk_table"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//td[contains(text(),"失效日期:")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="xxgk_table"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="xxgk_table"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '济宁' + organ

    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99263'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "JINING"
    zt_provider = "jininggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   山东省泰安市
def policy_taianlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count / 25)

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1) * 25 + 1
                end = (page + 2) * 25
                if end >= max_count:
                    end = max_count
                dic = {"start": start, "end": end, "page_info": list_json['page_info']}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.taian.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99264'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span[1]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_taianlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('共&nbsp;(\d+)&nbsp;页', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"start": start, "end": end, "page_info": list_json["page_info"]}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//div[@class="zfxxgk_zdgkc"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.taian.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99264'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/span/b/text()|a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('b/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_taianlist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('共&nbsp;(\d+)&nbsp;页', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"start": start, "end": end, "page_info": list_json["page_info"]}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//body/div/div/div')[1:]
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('div[1]/a/@href').extract_first()
            base_url = f'http://www.taian.gov.cn/col/col249725/index.html'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99261'
            article_json["url"] = url
            article_json["title"] = li.xpath('div[1]/a/span/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('div[4]/span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_taianarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_taianarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="title"]//h1//text()|//div[@class="ewb-article-tt"]//text()'
                              '|//p[@class="con-title"]//text()|//div[@class="nr_right_tit"]//text()'
                              '|//div[@class="name"]//text()|//div[@class="main-fl-tit"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if '\n' in title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="xxgk_table"]//td[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="xxgk_table"]//td[contains(text(),"索")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//td[contains(text(),"主题:")]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"主题词:")]/parent::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="xxgk_table"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
    invalid_date = ''.join(res.xpath('//table[@class="xxgk_table"]//td[contains(text(),"有效期")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="xxgk_table"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="xxgk_table"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '泰安' + organ

    fulltext_xpath = '//div[@id="zoom"]|//div[@class="ewb-article-detail"]|//meta[@name="ContentStart"]/ancestor::div[1]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99264'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "TAIAN"
    zt_provider = "taiangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   山东省威海市
def policy_weihailist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count / 25)

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1) * 25 + 1
                end = (page + 2) * 25
                if end >= max_count:
                    end = max_count
                dic = {"start": start, "end": end, "page_info": list_json['page_info']}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.weihai.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99265'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('div[1]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_weihailist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall(';共(\d+)页&', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"start": start, "end": end, "page_info": list_json["page_info"]}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//div[@class="zfxxgk_zdgkc"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.weihai.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99265'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('b/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_weihaiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_weihaiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="contentA1"]//h1/text()|//div[@class="xxgkcont"]/h2/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if 'hide_s' in html:
        pub_no = ''.join(res.xpath('//table[@class="hide_s"]//td[contains(text(),"文号:")]/following::td[1]/text()').extract()).strip()
        index_no = ''.join(res.xpath('//table[@class="hide_s"]//td[contains(text(),"索引号:")]/following::td[1]/text()').extract()).strip()
        subject = ''.join(res.xpath('//table[@class="hide_s"]//td[contains(text(),"内容分类")]/following::td[1]/text()').extract()).strip()
        written_date = ''.join(res.xpath('//table[@class="hide_s"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
        legal_status = ''.join(res.xpath('//table[@class="hide_s"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
        organ = ''.join(res.xpath('//table[@class="hide_s"]//td[contains(text(),"发布单位")]/following::td[1]/text()').extract()).strip()
    else:
        pub_no = ''.join(res.xpath('//div[@class="inpublic"]//td[contains(text(),"文号")]/following::td[1]/text()').extract()).strip()
        index_no = ''.join(res.xpath('//div[@class="inpublic"]//td[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
        subject = ''.join(res.xpath('//div[@class="inpublic"]//td[contains(text(),"内容分类")]/following::td[1]/text()').extract()).strip()
        written_date = ''.join(res.xpath('//div[@class="inpublic"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
        legal_status = ''.join(res.xpath('//div[@class="inpublic"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
        organ = ''.join(res.xpath('//div[@class="inpublic"]//td[contains(text(),"发布单位")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '威海' + organ

    fulltext_xpath = '//div[@class="art_con"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99265'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "WEIHAI"
    zt_provider = "weihaigovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['organ'] = organ
    data['pub_no'] = pub_no
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['subject'] = subject
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   山东省日照市
def policy_rizhaolist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count / 25)

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1) * 25 + 1
                end = (page + 2) * 25
                if end >= max_count:
                    end = max_count
                dic = {"start": start, "end": end, "columnid": list_json['columnid'], "unitid": list_json['unitid']}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            base_url = f'http://www.rizhao.gov.cn'
            # url = parse.urljoin(base_url, href)
            list_rawid = callmodel.sql_model.list_rawid
            if '207870_502025' == list_rawid or '215287_473537' == list_rawid or '215288_473537' == list_rawid:
                href = li.xpath('a/@href').extract_first()
                title = li.xpath('a/@title').extract_first().strip()
                date = li.xpath('span/text()').extract_first().replace('[','').replace(']','').strip()
            elif '239154_517235' == list_rawid:
                href = li.xpath('div/p[@class="fgk_biaoti"]/a[1]/@href').extract_first()
                title = li.xpath('div/p[@class="fgk_biaoti"]/a[1]/text()').extract_first().strip()
                date = ''
            else:
                href = li.xpath('td[2]/a/@href').extract_first()
                title = li.xpath('td[2]/a/text()').extract_first().strip()
                date = li.xpath('td[3]/text()').extract_first().strip()
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99266'
            article_json["url"] = url
            article_json["title"] = title
            article_json["pub_date"] = date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_rizhaolist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('共&nbsp;(\d+)&nbsp;页', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"start": start, "end": end, "page_info": list_json["page_info"]}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//div[@class="zfxxgk_zdgkc"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.rizhao.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99266'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('b/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result

def policy_rizhaolist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            max_count = re.findall('共&nbsp;(\d+)&nbsp;页', para_dicts["data"]["1_1"]['html'])
            max_count = int(max_count[0]) if max_count else 1
            total_page = max_count
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"start": start, "end": end, "page_info": list_json["page_info"]}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//ul[@class="zc_listul"]//li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('p[@class="list_title"]/a/@href').extract_first()
            base_url = f'http://www.rizhao.gov.cn/col/col287338/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99266'
            article_json["url"] = url
            article_json["title"] = ''.join(li.xpath('p[@class="list_title"]/a//text()').extract()).strip()
            article_json["pub_date"] = li.xpath('p[@class="list_con"]/span[3]/text()').extract_first().replace('发布日期：', '').strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_rizhaoarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_rizhaoarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)
    if len(pub_date) < 8:
        pub_date_info = ''.join(res.xpath('//li[@class="fbrq"]/text()').extract()).strip()
        pub_date = pub_date_info.replace('发布日期：', '').strip()
        pub_year = pub_date[:4]
    title = ''.join(res.xpath('//div[@class="article_title"]//text()|//span[@class="biaoti"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if 'class="xxgk"' in html:
        pub_no = ''.join(res.xpath('//div[@class="xxgk"]//th[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
        index_no = ''.join(res.xpath('//div[@class="xxgk"]//th[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
        # subject = ''.join(res.xpath('//td[contains(text(),"主题:")]/following::td[1]/text()').extract()).strip()
        # subject_word = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"主题词:")]/parent::td[1]/text()').extract()).strip()
        written_date = ''.join(res.xpath('//div[@class="xxgk"]//th[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
        # impl_date = ''.join(res.xpath('//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
        # invalid_date = ''.join(res.xpath('//td[contains(text(),"失效日期:")]/following::td[1]/text()').extract()).strip()
        legal_status = ''.join(res.xpath('//div[@class="xxgk"]//th[contains(text(),"效") and contains(text(),"性")]/following::td[1]/text()').extract()).strip()
        organ = ''.join(res.xpath('//div[@class="xxgk"]//th[contains(text(),"发布机关")]/following::td[1]/text()').extract()).strip()
    else:
        pub_no = ''.join(res.xpath('//table//td[contains(text(),"文号:")]/following::td[1]/text()').extract()).strip()
        index_no = ''.join(res.xpath('//table//td[contains(text(),"索引号:")]/following::td[1]/text()').extract()).strip()
        # subject = ''.join(res.xpath('//td[contains(text(),"主题:")]/following::td[1]/text()').extract()).strip()
        # subject_word = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"主题词:")]/parent::td[1]/text()').extract()).strip()
        written_date = ''.join(res.xpath('//table//td[contains(text(),"成文日期:")]/following::td[1]/text()').extract()).strip()
        # impl_date = ''.join(res.xpath('//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
        # invalid_date = ''.join(res.xpath('//td[contains(text(),"失效日期:")]/following::td[1]/text()').extract()).strip()
        legal_status = ''.join(res.xpath('//table//td[contains(text(),"效力状态:")]/following::td[1]/text()').extract()).strip()
        organ = ''.join(res.xpath('//table//td[contains(text(),"发布机构:")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '日照' + organ

    fulltext_xpath = '//div[@class="article_text"]|//div[@class="wz_zhuti"]|//div[contains(@class,"article_text")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99266'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "RIZHAO"
    zt_provider = "rizhaogovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   山东省滨州市
def policy_binzhoulist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        tcount = html_json['result']['total']
        total_page = math.ceil(tcount / 20)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page +1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['result']['records']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            rawid = li['id']
            # base_url = 'https://www.shcm.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = f'http://www.binzhou.gov.cn/zfxxgk/news/html/?{rawid}.html'
            # rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99267'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['fbdate']
            article_json["fwzh"] = li.get('fwzh', '')
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_binzhouarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_binzhouarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html_json = json.loads(callmodel.para_dicts['data']['1_1']['html'])
    html = html_json['result']['body']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    provider_url = article_json['url']
    res = Selector(text=html)
    title = html_json['result']['title']
    if not title:
        title = article_json['title'].strip()
    pub_no = article_json['fwzh']
    # index_no = html_json['result']['fbdate']
    # subject = ''.join(res.xpath('//div[@class="general-top"]/p[contains(text(),"主题分类")]/following::p[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="tab-page"]//td[contains(text(),"主")and contains(text(),"词")]/following::td[1]/text()').extract()).strip()
    written_date = html_json['result']['fbdate']
    # impl_date = ''.join(res.xpath('//table[@class="t1"]//td[contains(text(),"生效日期")]/following::td[1]//text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//table[@class="t1"]//td[contains(text(),"废止日期")]/following::td[1]//text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//div[@class="tab-page"]//td[contains(text(),"时")and contains(text(),"效")]/following::td[1]/text()').extract()).strip()
    organ = html_json['result']['fwjg']
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '滨州' + organ

    # fulltext_xpath = '//div[@id="ivs_content"]'
    fulltext = html
    # if not fulltext:
    #     raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99267'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "BINZHOU"
    zt_provider = "binzhougovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    # data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'(//body)')
    file_info2 = list()
    purl = html_json['result']['attachments']
    if purl:
        url = parse.urljoin(provider_url, purl)
        dic = {'url': url, 'name': html_json['result']['hylx'], 'pub_year': pub_year, 'keyid': lngid}
        file_info2.append(dic)
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  山东省德州市
def policy_dezhoulist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('maxPageNum = (\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            page_id = re.findall('#comp_(.*?)"', para_dicts["data"]["1_1"]['html'])[0]
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page_id}_{total_page - page}", "url_part": f"{list_json['url_part']}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        # if 'xxgkml' in list_rawid:
        if page_index == 0:
            li_list = res.xpath('//span[contains(@id,"comp")]//ul/li')
        else:
            li_list = res.xpath('//ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            if not href:
                continue
            if 'n54284698/n54284713/n54284753' == list_rawid:
                base_url = f'http://fgw.dezhou.gov.cn/n54284698/n54284713/n54284753/index.html'
            else:
                base_url = f'http://www.dezhou.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            # url = base_url + href
            if 'htm' not in url:
                continue
            rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99268'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span[@class="wgrey"]/text()|b/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_dezhouarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_dezhouarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="warttitle"]//text()|//div[@class="zwxxgk_ndbgwz"]//h1/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="infos"]//dt[contains(text(),"文") and contains(text(),"号")]/following::dd[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="infos"]//dt[contains(text(),"索") and contains(text(),"号")]/following::dd[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//td[contains(text(),"主题:")]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"主题词:")]/parent::td[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//table//td[contains(text(),"成文日期:")]/following::td[1]/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//td[contains(text(),"失效日期:")]/following::td[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//table//td[contains(text(),"效力状态:")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="infos"]//dt[contains(text(),"发布机构")]/following::dd[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '德州' + organ

    fulltext_xpath = '//div[@id="Zoom"]|//div[@class="warttext"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99268'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "DEZHOU"
    zt_provider = "dezhougovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   山东省聊城市
def policy_liaochenglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        # tcount = html_json['page']['total']
        total_page = html_json['page']['total']
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['list']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            # base_url = 'https://www.shcm.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = li['link']
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99269'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = ''
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_liaochenglist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        if 'zwgk/zwdt/gsgg' == callmodel.sql_model.list_rawid:
            max_count = re.findall('createPageHTML\((\d+)', para_dicts["data"]["1_1"]['html'])
            max_count = int(max_count[0]) if max_count else 1
        else:
            max_count = re.findall('index_(\d+)', para_dicts["data"]["1_1"]['html'])
            max_count = int(max_count[-1]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"index_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        if 'zwgk/zwdt/gsgg' == list_rawid:
            li_list = res.xpath('//div[@class="tab-content"]//ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                if not href:
                    continue
                base_url = f'http://www.liaocheng.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                # url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99269'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('div/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//ul[@class="newslist-ul"]/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                if not href:
                    continue
                base_url = f'http://www.liaocheng.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                # url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99269'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_liaochengarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_liaochengarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="news-cont"]//h2/text()|//div[@class="zwgk-content-left-title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if not pub_date:
        pub_date = ''.join(res.xpath('//div[@class="table-responsive"]//td[contains(text(),"发") and contains(text(),"期")]/following::td[1]/text()').extract()).strip()
        pub_year = pub_date[:4]
    pub_no = ''.join(res.xpath('//div[@class="table-responsive"]//td[contains(text(),"文") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="table-responsive"]//td[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//td[contains(text(),"主题:")]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"主题词:")]/parent::td[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//table//td[contains(text(),"成文日期:")]/following::td[1]/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//td[contains(text(),"失效日期:")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="table-responsive"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="table-responsive"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '聊城' + organ

    fulltext_xpath = '//div[@id="Zoom"]|//div[@class="zwgk-content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99269'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "LIAOCHENG"
    zt_provider = "liaochenggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  山东省临沂市
def policy_linyilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            max_count = re.findall(">\d+/(\d+)", para_dicts["data"]["1_1"]['html'])
            if not max_count:
                max_count = re.findall(";&nbsp;\d+/(\d+)", para_dicts["data"]["1_1"]['html'])
            max_count = int(max_count[0]) if max_count else 1
            total_page = max_count
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"/{total_page - page}.htm"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        list_rawid = callmodel.sql_model.list_rawid
        if 'xw/gsgg' == list_rawid or 'gk/zcjd1/wzjd' == list_rawid or 'bmgk/fgwj/zcjd/fzr_zjjd' == list_rawid:
            li_list = res.xpath('//div[@class="lblist"]//ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('div[1]/a/@href').extract_first()
                if not href:
                    continue
                if page_index == 0:
                    base_url = f'http://www.linyi.gov.cn/{callmodel.sql_model.list_rawid}.htm'
                else:
                    base_url = f'http://www.linyi.gov.cn/{callmodel.sql_model.list_rawid}/1.htm'
                # base_url = f'https://wsjkw.sh.gov.cn'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-2]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99270'
                article_json["url"] = url
                article_json["title"] = li.xpath('div[1]/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('div[2]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//div[contains(@class,"govnewslist")]//table/tr|//div[contains(@class,"govnewslist")]//ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[1]/a/@href|div[1]/a/@href').extract_first()
                if not href:
                    continue
                if page_index == 0:
                    base_url = f'http://www.linyi.gov.cn/{callmodel.sql_model.list_rawid}.htm'
                else:
                    base_url = f'http://www.linyi.gov.cn/{callmodel.sql_model.list_rawid}/1.htm'
                # base_url = f'https://wsjkw.sh.gov.cn'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-2]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99270'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[1]/a/text()|div[1]/a/text()').extract_first().strip()
                if 'zt/zcwjfbpt/swwj' == list_rawid:
                    pub_date = li.xpath('td[2]/a/text()').extract_first().strip()
                else:
                    pub_date = li.xpath('td[3]/a/text()|div[2]/text()').extract_first().strip()
                article_json["pub_date"] = pub_date
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_linyiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_linyiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    provider_url = article_json['url']
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if not pub_date:
        pub_date_info = ''.join(res.xpath('//div[@class="xxsy"]//td[contains(text(),"发布日期")]/following::td[1]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    if not pub_date:
        raise Exception
    pub_no = ''.join(res.xpath('//div[@class="xxsy"]//td[contains(text(),"文号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="xxsy"]//td[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//div[@class="catalog"]//font[contains(text(),"主题分类")]/parent::span[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="article-info"]//span[contains(text(),"主题词")]/following::span[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="xxsy"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//table[contains(@class,"table-bordered")]//td[contains(text(),"实施日期")]/following::td[1]/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//div[@class="article-info"]//span[contains(text(),"失效日期")]/following::span[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="xxsy"]//td[contains(text(),"效力状态")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="xxsy"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '临沂' + organ

    fulltext_xpath = '//div[contains(@id,"vsb_content")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99270'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "LINYI"
    zt_provider = "linyigovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   山东省菏泽市
def policy_hezelist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('<totalrecord>(\d+)</totalrecord>', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count / 25)

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1, 3):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                start = (page - 1) * 25 + 1
                end = (page + 2) * 25
                if end >= max_count:
                    end = max_count
                dic = {"start": start, "end": end, "page_info": list_json['page_info']}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//record')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href|td[2]/a/@href').extract_first()
            base_url = f'http://www.heze.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99271'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()|td[2]/a/span/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('div/span/text()|td[4]/span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_hezelist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('共&nbsp;(\d+)&nbsp;页', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"start": start, "end": end, "page_info": list_json["page_info"]}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//div[@class="zfxxgk_zdgkc"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.heze.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99271'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('b/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_hezearticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_hezearticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//p[@class="con-title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath('//td[contains(text(),"发文文号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//td[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//td[contains(text(),"主题:")]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"主题词:")]/parent::td[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//td[contains(text(),"成文日期:")]/following::td[1]/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//td[contains(text(),"失效日期:")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '菏泽' + organ

    fulltext_xpath = '//div[@class="main-txt"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99271'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "HEZE"
    zt_provider = "hezegovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   福建省发展和改革委员会
def policy_fgwfujianlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        # tcount = html_json['page']['total']
        total_page = html_json['pageCount']
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['data']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            # base_url = 'https://www.shcm.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = li['docpuburl']
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99272'
            article_json["url"] = url
            article_json["title"] = li['doctitle']
            article_json["pub_date"] = li['docreltime']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_fgwfujianlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'].replace('\n','').replace('\t','').replace('\r',''))
        # tcount = html_json['page']['total']
        total_page = int(html_json['pagenum'])
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['docs']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            # base_url = 'https://www.shcm.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = li['url']
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99272'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['pubtime']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_fgwfujianarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_fgwfujianarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[contains(@class,"article_title")]//text()|//div[@class="xl-content"]//h2//text()').extract()).strip()
    if '\n' in title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"文号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"索 引 号")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//td[contains(text(),"主题:")]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"主题词:")]/parent::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"生成日期")]/following::td[1]/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//td[contains(text(),"失效日期:")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('省'):
        organ = '福建' + organ

    fulltext_xpath = '//div[contains(@class,"xl-article-nr")]|//div[contains(@class,"article_area")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99272'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "FGWFUJIAN"
    zt_provider = "fgwfujiangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   福建省工业和信息化厅
def policy_gxtfujianlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        # tcount = html_json['page']['total']
        total_page = html_json['pageCount']
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['data']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            # base_url = 'https://www.shcm.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = li['docpuburl']
            if 'htm' not in url:
                print(url)
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99273'
            article_json["url"] = url
            article_json["title"] = li['doctitle']
            article_json["pub_date"] = li['docreltime']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_gxtfujianarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_gxtfujianarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[contains(@class,"article_title")]//text()|//div[@class="xl_con2"]//h2//text()').extract()).strip()
    if '\n' in title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"文号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"索 引 号")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//td[contains(text(),"主题:")]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"主题词:")]/parent::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"生成日期")]/following::td[1]/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//td[contains(text(),"失效日期:")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('省'):
        organ = '福建' + organ

    fulltext_xpath = '//div[contains(@class,"xl-article-nr")]|//div[contains(@class,"article_area")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99273'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "GXTFUJIAN"
    zt_provider = "gxtfujiangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   福建省科学技术厅
def policy_kjtfujianlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'].replace('\n','').replace('\t','').replace('\r',''))
        # tcount = html_json['page']['total']
        total_page = int(html_json['pagenum'])
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['docs']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            # base_url = 'https://www.shcm.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = li['url']
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99274'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['pubtime']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_kjtfujianarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_kjtfujianarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    # pub_date = article_json['pub_date']
    # pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="xl_big_bOX"]//h4/text()|//div[@id="conter"]//h4//text()|//h1//text()').extract()).strip()
    if '附件下载':
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_date_info = ''.join(res.xpath('//p[@class="fl"]/text()').extract()).strip()
    pub_date = clean_pubdate(pub_date_info)
    pub_year = pub_date[:4]
    if not pub_date:
        pub_date_info = ''.join(res.xpath('//span[contains(text(),"时间：")]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    if not pub_date:
        pub_date_info = ''.join(res.xpath('//meta[@name="PubDate"]/@content').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    if not pub_date:
        raise Exception
    pub_no = ''.join(res.xpath('//div[@class="syh_box"]//span[contains(text(),"备注/文号")]//text()').extract()).strip()
    pub_no = pub_no.split('：')[-1].strip()
    index_no = ''.join(res.xpath('//div[@class="syh_box"]//span[contains(text(),"索")]//text()').extract()).strip()
    index_no = index_no.split('：')[-1].strip()
    # subject = ''.join(res.xpath('//td[contains(text(),"主题:")]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"主题词:")]/parent::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="syh_box"]//span[contains(text(),"生成日期")]//text()').extract()).strip()
    written_date = written_date.split('：')[-1].strip()
    # impl_date = ''.join(res.xpath('//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//td[contains(text(),"失效日期:")]/following::td[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="syh_box"]//span[contains(text(),"发布机构")]//text()').extract()).strip()
    organ = organ.split('：')[-1].strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('省'):
        organ = '福建' + organ
    fulltext_xpath = '//div[@class="xl_tit_box"]|//div[@id="detailCont2"]|//div[contains(@class,"box1")]|//div[@class="article_area"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99274'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "KJTFUJIAN"
    zt_provider = "kjtfujiangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[@id="fjxz"])')
    file_info3 = get_file_info(data, res, f'(//div[@class="xl_list1"])')
    file_info4 = get_file_info(data, res, f'(//div[@class="article_attachment"])')
    file_info = file_info1 + file_info2 + file_info3 + file_info4
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   福建省教育厅
def policy_jytfujianlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'].replace('\n','').replace('\t','').replace('\r',''))
        # tcount = html_json['page']['total']
        total_page = int(html_json['pagenum'])
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['docs']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            # base_url = 'https://www.shcm.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = li['url']
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99275'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['pubtime']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jytfujianarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jytfujianarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="xl-nr-box"]//h2/text()|//div[@class="pad-y"]//h3//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"文号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"索 引 号")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//td[contains(text(),"主题:")]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"主题词:")]/parent::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"生成日期")]/following::td[1]/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//td[contains(text(),"失效日期:")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('省'):
        organ = '福建' + organ

    fulltext_xpath = '//div[contains(@class,"xl-article-nr")]|//div[contains(@class,"detail_content_display")]|//div[@class="article_area"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99275'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "JYTFUJIAN"
    zt_provider = "jytfujiangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   福建省民政厅
def policy_mztfujianlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'].replace('\n','').replace('\t','').replace('\r',''))
        # tcount = html_json['page']['total']
        total_page = int(html_json['pagenum'])
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['docs']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            # base_url = 'https://www.shcm.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = li['url']
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99276'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['pubtime']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_mztfujianarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_mztfujianarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="sever"]//h2/text()|//div[@class="n-tit"]//h3/text()|//div[@class="xl_big_bOX"]//h4/text()').extract()).strip()
    if '\n' in title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    # pub_no = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"文号")]/following::td[1]/text()').extract()).strip()
    # index_no = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"索 引 号")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//td[contains(text(),"主题:")]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"主题词:")]/parent::td[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"生成日期")]/following::td[1]/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//td[contains(text(),"失效日期:")]/following::td[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    # organ = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    # if organ.startswith('省'):
    #     organ = '福建' + organ

    fulltext_xpath = '//div[contains(@class,"detp")]|//div[contains(@class,"box1")]|//div[contains(@class,"xl_tit_box")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99276'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "MZTFUJIAN"
    zt_provider = "mztfujiangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[@id="fjxz"])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   福建省财政厅
def policy_cztfujianlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html = para_dicts["data"]["1_1"]['html'].replace('\n','').replace('\t','').replace('\r','')
        try:
            html_json = json.loads(html)
        except:
            html_json = dict()
            html_json['docs'] = json.loads(re.findall("\[\{.*?\}\]",html)[0])
            html_json['pagenum'] = re.findall('"pagenum":"(\d+)"',html)[0]
        # tcount = html_json['page']['total']
        total_page = int(html_json['pagenum'])
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['docs']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            # base_url = 'https://www.shcm.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = li['url']
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99277'
            article_json["url"] = url
            article_json["title"] = li['title']
            pub_date = li.get('pubtime', '')
            if not pub_date:
                pub_date = li['time']
            article_json["pub_date"] = pub_date
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cztfujianarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_cztfujianarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="xl-nr-box"]//h2/text()|//div[@class="czxw-tit"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if 'tp-table tp-pc' in html:
        pub_no = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"文号")]/following::td[1]/text()').extract()).strip()
        index_no = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"索 引 号")]/following::td[1]/text()').extract()).strip()
        written_date = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"生成日期")]/following::td[1]/text()').extract()).strip()
        organ = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    else:
        pub_no = ''.join(res.xpath('//div[@class="czxw_ml"]//li[contains(text(),"文") and contains(text(),"号")]//text()').extract()).strip()
        pub_no = pub_no.split('：')[-1].strip()
        index_no = ''.join(res.xpath('//div[@class="czxw_ml"]//li[contains(text(),"索")]//text()').extract()).strip()
        index_no = index_no.split('：')[-1].strip()
        written_date = ''.join(res.xpath('//div[@class="czxw_ml"]//li[contains(text(),"发文日期")]//text()').extract()).strip()
        written_date = written_date.split('：')[-1].strip()
        organ = ''.join(res.xpath('//div[@class="czxw_ml"]//li[contains(text(),"发文机构")]//text()').extract()).strip()
        organ = organ.split('：')[-1].strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    # if organ.startswith('省'):
    #     organ = '福建' + organ

    fulltext_xpath = '//div[@class="czxw-w"]|//div[contains(@class,"detailCont")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99277'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CZTFUJIAN"
    zt_provider = "cztfujiangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[@class="fj"])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   福建省人力资源和社会保障厅
def policy_rstfujianlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        # tcount = html_json['page']['total']
        total_page = html_json['pageCount']
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['data']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            # base_url = 'https://www.shcm.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = li['docpuburl']
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99278'
            article_json["url"] = url
            article_json["title"] = li['doctitle']
            article_json["pub_date"] = li['docreltime']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_rstfujianlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'].replace('\n','').replace('\t','').replace('\r',''))
        # tcount = html_json['page']['total']
        total_page = int(html_json['pagenum'])
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['docs']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            # base_url = 'https://www.shcm.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = li['url']
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99278'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['pubtime']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_rstfujianarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_rstfujianarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="smgb-xl-tit"]//h2//text()|//div[@class="xl-nr-box"]//h2//text()').extract()).strip()
    if '\n' in title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"文号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"索 引 号")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"生成日期")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('省'):
        organ = '福建' + organ

    fulltext_xpath = '//div[contains(@class,"xl-article-nr")]|//div[contains(@class,"xl_con1")]|//div[@class="article_area"]|//div[@class="TRS_Editor"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99278'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "RSTFUJIAN"
    zt_provider = "rstfujiangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   福建省农业农村厅
def policy_nynctfujianlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        # tcount = html_json['page']['total']
        total_page = html_json['pageCount']
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['data']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            # base_url = 'https://www.shcm.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = li['docpuburl']
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99279'
            article_json["url"] = url
            article_json["title"] = li['doctitle']
            article_json["pub_date"] = li['docreltime']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_nynctfujianlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'].replace('\n','').replace('\t','').replace('\r',''))
        # tcount = html_json['page']['total']
        total_page = int(html_json['pagenum'])
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['docs']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            # base_url = 'https://www.shcm.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = li['url']
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99279'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['pubtime']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_nynctfujianarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_nynctfujianarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="xl_tit"]//text()|//div[@class="nb_big_tit_Box nb_big_titt_Box"]//h4//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="syh_boxx"]//span[contains(text(),"文号")]//text()').extract()).strip()
    pub_no = pub_no.split('：')[-1].strip()
    index_no = ''.join(res.xpath('//div[@class="syh_boxx"]//span[contains(text(),"索 引 号")]//text()').extract()).strip()
    index_no = index_no.split('：')[-1].strip()
    written_date = ''.join(res.xpath('//div[@class="syh_boxx"]//span[contains(text(),"生成日期")]//text()').extract()).strip()
    written_date = written_date.split('：')[-1].strip()
    organ = ''.join(res.xpath('//div[@class="syh_boxx"]//span[contains(text(),"发布机构")]//text()').extract()).strip()
    organ = organ.split('：')[-1].strip()
    if organ.startswith('省'):
        organ = '福建' + organ

    fulltext_xpath = '//div[contains(@class,"detailCont")]|//div[@id="detailCont"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99279'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "NYNCTFUJIAN"
    zt_provider = "nynctfujiangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   福建省住房和城乡建设厅
def policy_zjtfujianlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        # tcount = html_json['page']['total']
        total_page = html_json['pageCount']
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['data']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            # base_url = 'https://www.shcm.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = li['docpuburl']
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99280'
            article_json["url"] = url
            article_json["title"] = li['doctitle']
            article_json["pub_date"] = li['docreltime']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_zjtfujianlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html = para_dicts["data"]["1_1"]['html'].replace('\n', '').replace('\t', '').replace('\r', '')
        try:
            html_json = json.loads(html)
        except:
            html_json = dict()
            html_json['docs'] = eval(re.findall("\[\{.*?\}\]", html)[0])
            html_json['pagenum'] = re.findall('"pagenum":"(\d+)"', html)[0]
        # tcount = html_json['page']['total']
        total_page = int(html_json['pagenum'])
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['docs']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            # base_url = 'https://www.shcm.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = li['url']
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99280'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['pubtime']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_zjtfujianarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_zjtfujianarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']

    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="xl_tit"]//text()|//div[@class="xl_con2"]//h2//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_date_info = ''.join(res.xpath('//span[contains(text(),"发布时间：")]/text()').extract()).strip()
    pub_date = clean_pubdate(pub_date_info)
    pub_year = pub_date[:4]
    if not pub_date:
        pub_date_info = ''.join(res.xpath('//span[contains(text(),"日期：")]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    if not pub_date:
        raise Exception
    pub_no = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"文号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"索 引 号")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//td[contains(text(),"主题:")]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"主题词:")]/parent::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"生成日期")]/following::td[1]/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//td[contains(text(),"失效日期:")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('省'):
        organ = '福建' + organ

    fulltext_xpath = '//div[contains(@class,"detailCont")]|//div[contains(@class,"xl_con1")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99280'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "ZJTFUJIAN"
    zt_provider = "zjtfujiangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[@class="fj"])')
    file_info3 = list()
    href_list = re.findall('link:"(.*?)"', html)
    name_list = re.findall('name:"(.*?)"', html)
    for href, name in zip(href_list, name_list):
        file_url = parse.urljoin(provider_url, href)
        file_info3.append({'url': file_url, 'name': name, 'pub_year': pub_year, 'keyid': lngid})
    file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   福建省卫生健康委员会
def policy_wjwfujianlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        # tcount = html_json['page']['total']
        total_page = html_json['pageCount']
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['data']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            # base_url = 'https://www.shcm.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = li['docpuburl']
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99281'
            article_json["url"] = url
            article_json["title"] = li['doctitle']
            article_json["pub_date"] = li['docreltime']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_wjwfujianlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html = para_dicts["data"]["1_1"]['html'].replace('\n', '').replace('\t', '').replace('\r', '')
        try:
            html_json = json.loads(html)
        except:
            html_json = dict()
            html_json['docs'] = json.loads(re.findall("\[\{.*?\}\]", html)[0])
            html_json['pagenum'] = re.findall('"pagenum":"(\d+)"', html)[0]
        # tcount = html_json['page']['total']
        total_page = int(html_json['pagenum'])
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['docs']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            # base_url = 'https://www.shcm.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = li['url']
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99281'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['pubtime']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_wjwfujianarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_wjwfujianarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="smgb-xl-tit"]//h2//text()|//div[@class="xl-nr-box"]//h2//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"文号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"索 引 号")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//td[contains(text(),"主题:")]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"主题词:")]/parent::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"生成日期")]/following::td[1]/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//td[contains(text(),"失效日期:")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('省'):
        organ = '福建' + organ

    fulltext_xpath = '//div[contains(@class,"detailCont")]|//div[contains(@class,"smgb-article")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99281'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "WJWFUJIAN"
    zt_provider = "wjwfujiangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   福建省福州市
def policy_fuzhoulist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        # tcount = html_json['page']['total']
        total_page = html_json['pageCount']
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['data']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            # base_url = 'https://www.shcm.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = li['docpuburl']
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99282'
            article_json["url"] = url
            article_json["title"] = li['doctitle']
            article_json["pub_date"] = li['docreltime']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_fuzhouarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_fuzhouarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)
    # title = ''.join(res.xpath('//div[@class="smgb-xl-tit"]//h2//text()|//div[@class="xl-nr-box"]//h2//text()|//div[@class="xl_news_title"]//h4//text()').extract()).strip()
    # if not title:
    title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if 'class="index_box' in html:
        pub_no = ''.join(res.xpath('//ul[@class="trt-row"]/li[contains(text(),"发文字号")]/font/text()').extract()).strip()
        index_no = ''.join(res.xpath('//ul[@class="trt-row"]/li[contains(text(),"索")]/parent::li[1]/font/text()').extract()).strip()
        written_date = ''.join(res.xpath('//ul[@class="trt-row"]/li[contains(text(),"成文日期")]/font/text()').extract()).strip()
        legal_status = ''.join(res.xpath('//ul[@class="trt-row"]/li[contains(text(),"效")]/em/text()').extract()).strip()
        subject = ''.join(res.xpath('//ul[@class="trt-row"]/li[contains(text(),"主题分类")]/font/text()').extract()).strip()
        organ = ''.join(res.xpath('//ul[@class="trt-row"]/li[contains(text(),"发文机关")]/font/text()').extract()).strip()
    else:
        pub_no = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"文号")]/following::td[1]/text()').extract()).strip()
        if not pub_no:
            pub_no = ''.join(res.xpath('//div[@class="xl_news_title"]//span[contains(text(),"文号：")]/text()').extract()).strip()
            pub_no = pub_no.split('文号：')[-1].strip()
        index_no = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"索 引 号")]/following::td[1]/text()').extract()).strip()
        written_date = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"生成日期")]/following::td[1]/text()').extract()).strip()
        legal_status = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
        subject = ''
        organ = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '福州' + organ

    fulltext_xpath = '//div[@class="word_con"]|//div[@class="article_area"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        fulltext_xpath = '//div[contains(@class,"xl-article-nr")]|//div[contains(@class,"smgb-article")]|//div[contains(@class,"detailContent")]|//div[contains(@id,"detailContent")]|//div[@id="detailCon"]|//div[@id="docontcent"]|//div[@id="doc_content"]|//div[@class="box1"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99282'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "FUZHOU"
    zt_provider = "fuzhougovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[@class="smgb-fjxz-box"])')
    file_info3 = get_file_info(data, res, f'(//div[@id="fjxz"])')
    file_info4 = get_file_info(data, res, f'(//div[@class="xl_list mar-T5"])')
    file_info = file_info1 + file_info2 + file_info3 + file_info4
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  福建省厦门市
def policy_xmlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("pageCount: '(\d+)", para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall('"pagecount":(\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                if 'smartSearch' in callmodel.sql_model.list_rawid:
                    dic = {"page_info": f"currentpage={page+1}"}
                else:
                    dic = {"page_info": f"index_{page}.htm"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        if 'smartSearch' in callmodel.sql_model.list_rawid:
            html_json = json.loads(para_dicts["data"]["1_1"]['html'])
            li_list = html_json['rows']
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                # base_url = 'https://www.shcm.gov.cn'
                # url = parse.urljoin(base_url, href)
                url = li['docPuburl']
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99283'
                article_json["url"] = url
                article_json["title"] = li['docTitle']
                article_json["pub_date"] = li['PubDate']
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            res = Selector(text=para_dicts["data"]["1_1"]['html'])
            li_list = res.xpath('//div[@class="gl_list1"]//ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                base_url = f'http://www.xm.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                # base_url = f'http://www.bjchp.gov.cn'
                url = parse.urljoin(base_url, href)
                # url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-2]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99283'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_xmarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_xmarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//p[@class="xxgk_title"]//text()|//div[@class="xl_tit"]//text()|//div[@class="gzk-xl-article"]//h1//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="xl_box"]//span[contains(text(),"文") and contains(text(),"号")]/parent::div[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="xl_box"]//span[contains(text(),"索") and contains(text(),"号")]/parent::div[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="xl_box"]//span[contains(text(),"有") and contains(text(),"性")]/parent::div[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="xl_box"]//span[contains(text(),"成") and contains(text(),"期")]/parent::div[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="xl_box"]//span[contains(text(),"发") and contains(text(),"构")]/parent::div[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="fl"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '厦门' + organ

    fulltext_xpath = '//div[@class="tit_hov"]|//div[@id="fontzoom"]|//div[@id="trsContent"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99283'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "XM"
    zt_provider = "xmgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  福建省漳州市
def policy_zhangzhoulist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall(">共(\d+)页，", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"index_{page}.htm"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if '620416811908440000' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//span[@id="normativeDocumentRequests"]/div')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('div[1]/a/@href').extract_first()
                if 'id=' in href:
                    base_url = f'http://www.zhangzhou.gov.cn/{callmodel.sql_model.list_rawid}'
                    url = parse.urljoin(base_url, href)
                    rawid = re.findall('id=(.*?)&', url)[0]
                else:
                    base_url = f'http://www.zhangzhou.gov.cn'
                    url = base_url + href
                    rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # url = base_url + href
                if 'htm' not in url:
                    continue
                # rawid = url.split('/')[-2]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99284'
                article_json["url"] = url
                article_json["title"] = li.xpath('div[1]/a/text()').extract_first().strip()
                article_json["pub_date"] = ''
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//div[@class="mid-mj-list"]//ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('span[1]/a/@href').extract_first()
                if 'id=' in href and "&" in href:
                    base_url = f'http://www.zhangzhou.gov.cn/{callmodel.sql_model.list_rawid}'
                    url = parse.urljoin(base_url, href)
                    rawid = re.findall('id=(.*?)&', url)[0]
                else:
                    base_url = f'http://www.zhangzhou.gov.cn'
                    url = base_url + href
                    rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # url = base_url + href
                if 'htm' not in url:
                    continue
                # rawid = url.split('/')[-2]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99284'
                article_json["url"] = url
                article_json["title"] = li.xpath('span[1]/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span[2]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_zhangzhouarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_zhangzhouarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="wenjian-tabel"]//span[contains(text(),"标题")]/following::span[1]/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="content-title"]//text()|//div[@class="title"]/h1//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if not pub_date:
        pub_date = ''.join(res.xpath('//div[@class="wenjian-tabel"]//span[contains(text(),"印发日期")]/following::span[1]/text()').extract()).strip()
        pub_year = pub_date[:4]
    if not pub_date:
        pub_date = ''.join(res.xpath('//ul[@class="info"]//li[contains(text(),"发布日期")]//text()').extract()).strip()
        pub_date = pub_date.split('：')[-1].strip()
        pub_year = pub_date[:4]
    if not pub_date:
        pub_date = ''.join(res.xpath('//span[@class="fbsj-span"]/text()').extract()).strip()
        pub_year = pub_date[:4]

    if 'wenjian-tabel' in html:
        pub_no = ''.join(res.xpath('//div[@class="wenjian-tabel"]//span[contains(text(),"文号")]/following::span[1]/text()').extract()).strip()
        index_no = ''
        written_date = ''.join(res.xpath('//div[@class="wenjian-tabel"]//span[contains(text(),"签发日期")]/following::span[1]/text()').extract()).strip()
        legal_status = ''.join(res.xpath('//div[@class="wenjian-tabel"]//span[contains(text(),"效力状态")]/following::span[1]/text()').extract()).strip()
        organ = ''.join(res.xpath('//div[@class="wenjian-tabel"]//span[contains(text(),"颁布单位")]/following::span[1]/text()').extract()).strip()
    else:
        pub_no = ''.join(res.xpath('//ul[@class="info"]//li[contains(text(),"文") and contains(text(),"号")]//text()').extract()).strip()
        pub_no = pub_no.split('：')[-1].strip()
        index_no = ''.join(res.xpath('//ul[@class="info"]//li[contains(text(),"索 引 号")]//text()').extract()).strip()
        index_no = index_no.split('：')[-1].strip()
        written_date = ''.join(res.xpath('//ul[@class="info"]//li[contains(text(),"生成日期")]//text()').extract()).strip()
        written_date = written_date.split('：')[-1].strip()
        legal_status = ''.join(res.xpath('//ul[@class="info"]//li[contains(text(),"有效性")]//text()').extract()).strip()
        legal_status = legal_status.split('：')[-1].strip()
        organ = ''.join(res.xpath('//ul[@class="info"]//li[contains(text(),"发文机关")]//text()').extract()).strip()
        organ = organ.split('：')[-1].strip()
    if organ.startswith('市'):
        organ = '漳州' + organ

    fulltext_xpath = '//div[@class="text-contain"]|//div[@id="Content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99284'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "ZHANGZHOU"
    zt_provider = "zhangzhougovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[@class="wenjian-tabel"]|//ul[@class="info"])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  福建省泉州市
def policy_quanzhoulist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("pageCount: '(\d+)", para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall('"pagecount":(\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                if 'smartSearch' in callmodel.sql_model.list_rawid:
                    dic = {"page_info": f"currentpage={page+1}"}
                else:
                    dic = {"page_info": f"index_{page}.htm"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        if 'smartSearch' in callmodel.sql_model.list_rawid:
            html_json = json.loads(para_dicts["data"]["1_1"]['html'])
            li_list = html_json['rows']
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                # base_url = 'https://www.shcm.gov.cn'
                # url = parse.urljoin(base_url, href)
                url = li['docPuburl']
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99285'
                article_json["url"] = url
                article_json["title"] = li['docTitle']
                article_json["pub_date"] = li['docrelTime']
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            res = Selector(text=para_dicts["data"]["1_1"]['html'])
            li_list = res.xpath('//div[@class="gl_list"]//ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                base_url = f'http://www.quanzhou.gov.cn/{callmodel.sql_model.list_rawid}/index.htm'
                # base_url = f'http://www.bjchp.gov.cn'
                url = parse.urljoin(base_url, href)
                # url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-2]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99285'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_quanzhouarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_quanzhouarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="xl_tit"]//text()|//div[@class="xl_tit"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if '\n' in title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if 'js_tab2' in html:
        pub_no = ''.join(res.xpath('//table[@id="js_tab2"]//td[contains(text(),"文 号")]/following::td[1]/text()').extract()).strip()
        index_no = ''.join(res.xpath('//table[@id="js_tab2"]//td[contains(text(),"索 引 号")]/following::td[1]/text()').extract()).strip()
        written_date = ''.join(res.xpath('//table[@id="js_tab2"]//td[contains(text(),"生成日期")]/following::td[1]/text()').extract()).strip()
        subject = ''.join(res.xpath('//table[@id="js_tab2"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
        organ = ''.join(res.xpath('//table[@id="js_tab2"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    else:
        pub_no = ''.join(res.xpath('//div[@class="xl_con4"]//span[contains(text(),"文号")]/parent::div/text()').extract()).strip()
        index_no = ''.join(res.xpath('//div[@class="xl_con4"]//span[contains(text(),"索 引 号")]/parent::div/text()').extract()).strip()
        written_date = ''.join(res.xpath('//div[@class="xl_con4"]//span[contains(text(),"生成日期")]/parent::div/text()').extract()).strip()
        subject = ''
        organ = ''.join(res.xpath('//div[@class="xl_con4"]//span[contains(text(),"发布机构")]/parent::div/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '泉州' + organ

    fulltext_xpath = '//div[@id="main_xmb"]|//div[@class="TRS_Editor"]|//div[@class="xl_con1"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99285'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "QUANZHOU"
    zt_provider = "quanzhougovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[@id="file_xmb"])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   福建省三明市
def policy_smlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        # tcount = html_json['page']['total']
        total_page = html_json['pageCount']
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['data']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            # base_url = 'https://www.shcm.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = li['docpuburl']
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99286'
            article_json["url"] = url
            article_json["title"] = li['doctitle']
            article_json["pub_date"] = li['docreltime']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_smlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'].replace('\n','').replace('\t','').replace('\r',''))
        # tcount = html_json['page']['total']
        total_page = html_json['pagecount']
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['docs']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            # base_url = 'https://www.shcm.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = li['url']
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99286'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['docreltime']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_smarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_smarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="smgb-xl-tit"]//h2//text()|//div[contains(class,"xl-nr-box")]//h2//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"文号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"索 引 号")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//td[contains(text(),"主题:")]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"主题词:")]/parent::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"生成日期")]/following::td[1]/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//td[contains(text(),"失效日期:")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '三明' + organ

    fulltext_xpath = '//div[contains(@class,"detailCont")]|//div[contains(@class,"smgb-article")]|//div[@class="article_area"]'
    fulltext = '\n'.join(res.xpath(fulltext_xpath).extract())
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99286'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "SM"
    zt_provider = "smgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[@class="smgb-fjxz-box"])')
    file_info3 = get_file_info(data, res, f'(//div[@class="fj"])')
    file_info = file_info1 + file_info2 + file_info3
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   福建省莆田市
def policy_putianlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        # tcount = html_json['page']['total']
        total_page = html_json['pageCount']
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['data']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            # base_url = 'https://www.shcm.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = li['docpuburl']
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99287'
            article_json["url"] = url
            article_json["title"] = li['doctitle']
            article_json["pub_date"] = li['docreltime']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_putianarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_putianarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[contains(class,"xl-tit")]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    # pub_no = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"文号")]/following::td[1]/text()').extract()).strip()
    # index_no = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"索 引 号")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//td[contains(text(),"主题:")]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"主题词:")]/parent::td[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"生成日期")]/following::td[1]/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//td[contains(text(),"失效日期:")]/following::td[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    # organ = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    # if organ.startswith('市'):
    #     organ = '三明' + organ

    fulltext_xpath = '//div[@class="TRS_Editor"]|//div[@id="detailCont"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99287'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "PUTIAN"
    zt_provider = "putiangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[contains(@class,"myzj_xl_list")]|//div[@class="fjxz"])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  福建省南平市
def policy_nplist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall(">共(\d+)页，", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"index_{page}.htm"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if '620416811908440000' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="mid-mj-list"]//ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('span[1]/a/@href').extract_first()
            if 'id=' in href:
                base_url = f'https://www.np.gov.cn/{callmodel.sql_model.list_rawid}'
                url = parse.urljoin(base_url, href)
                rawid = re.findall('id=(.*?)&', url)[0]
            else:
                base_url = f'https://www.np.gov.cn'
                url = base_url + href
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # url = base_url + href
            if 'htm' not in url:
                continue
            # rawid = url.split('/')[-2]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99288'
            article_json["url"] = url
            article_json["title"] = li.xpath('span[1]/a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span[2]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_nparticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_nparticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="content-title"]//text()|//div[@class="title"]/h1//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath('//ul[@class="info"]//li[contains(text(),"发文字号")]//text()').extract()).strip()
    pub_no = pub_no.split('：')[-1].strip()
    index_no = ''.join(res.xpath('//ul[@class="info"]//li[contains(text(),"索 引 号")]//text()').extract()).strip()
    index_no = index_no.split('：')[-1].strip()
    written_date = ''.join(res.xpath('//ul[@class="info"]//li[contains(text(),"生成日期")]//text()').extract()).strip()
    written_date = written_date.split('：')[-1].strip()
    legal_status = ''.join(res.xpath('//ul[@class="info"]//li[contains(text(),"有效性")]//text()').extract()).strip()
    legal_status = legal_status.split('：')[-1].strip()
    organ = ''.join(res.xpath('//ul[@class="info"]//li[contains(text(),"发文机关")]//text()').extract()).strip()
    organ = organ.split('：')[-1].strip()
    if organ.startswith('市'):
        organ = '南平' + organ

    fulltext_xpath = '//div[@id="Content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99288'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "NP"
    zt_provider = "npgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//ul[@class="info"])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  福建省龙岩市
def policy_longyanlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("pageCount: '(\d+)", para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall('"pagecount":(\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count

        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                if 'smartSearch' in callmodel.sql_model.list_rawid:
                    dic = {"page_info": f"currentpage={page+1}"}
                else:
                    dic = {"page_info": f"index_{page}.htm"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        if 'smartSearch' in callmodel.sql_model.list_rawid:
            html_json = json.loads(para_dicts["data"]["1_1"]['html'])
            li_list = html_json['rows']
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                # base_url = 'https://www.shcm.gov.cn'
                # url = parse.urljoin(base_url, href)
                url = li['docPuburl']
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99289'
                article_json["url"] = url
                article_json["title"] = li['docTitle']
                article_json["pub_date"] = li['docrelTime']
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            res = Selector(text=para_dicts["data"]["1_1"]['html'])
            li_list = res.xpath('//div[@class="list_base list_large"]//ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a[1]/@href').extract_first()
                base_url = f'http://www.longyan.gov.cn/{callmodel.sql_model.list_rawid}/index.htm'
                # base_url = f'http://www.bjchp.gov.cn'
                url = parse.urljoin(base_url, href)
                # url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-2]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99289'
                article_json["url"] = url
                article_json["title"] = li.xpath('a[1]/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_longyanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_longyanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[contains(@class,"article_title")]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if not pub_date:
        pub_date_info = ''.join(res.xpath('//table[@class="tab2"]//td[contains(text(),"生成日期")]/following::td[1]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    if not pub_date:
        raise Exception
    pub_no = ''.join(res.xpath('//table[@class="tab2"]//td[contains(text(),"文 号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="tab2"]//td[contains(text(),"索 引 号")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="tab2"]//td[contains(text(),"生成日期")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="tab2"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="tab2"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '龙岩' + organ

    fulltext_xpath = '//div[@class="TRS_Editor"]|//div[contains(@class,"article_content")]|//div[@id="trsContent"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99289'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "LONGYAN"
    zt_provider = "longyangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[@class="article_attachment"])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   福建省宁德市
def policy_ningdelist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        # tcount = html_json['page']['total']
        total_page = html_json['pageCount']
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['data']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            # base_url = 'https://www.shcm.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = li['docpuburl']
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99290'
            article_json["url"] = url
            article_json["title"] = li['doctitle']
            article_json["pub_date"] = li['docreltime']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_ningdelist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'].replace('\n','').replace('\t','').replace('\r',''))
        # tcount = html_json['page']['total']
        total_page = html_json['page']['pagecount']
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['datas']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            # base_url = 'https://www.shcm.gov.cn'
            # url = parse.urljoin(base_url, href)
            url = li['docpuburl']
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99290'
            article_json["url"] = url
            article_json["title"] = li['doctitle']
            article_json["pub_date"] = li['docreltime']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_ningdearticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_ningdearticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[contains(class,"xl_tit")]//text()|//div[contains(class,"xl-nr-box")]//h2//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if 'syh_boxx' in html:
        pub_no = ''.join(res.xpath('//div[@class="syh_boxx"]//span[contains(text(),"文号：")]/text()').extract()).strip()
        pub_no = pub_no.split('：')[-1].strip()
        index_no = ''.join(res.xpath('//div[@class="syh_boxx"]//span[contains(text(),"索")]/text()').extract()).strip()
        index_no = index_no.split('：')[-1].strip()
        written_date = ''.join(res.xpath('//div[@class="syh_boxx"]//span[contains(text(),"生成日期：")]/text()').extract()).strip()
        written_date = written_date.split('：')[-1].strip()
        legal_status = ''.join(res.xpath('//div[@class="syh_boxx"]//span[contains(text(),"有效性：")]/text()').extract()).strip()
        legal_status = legal_status.split('：')[-1].strip()
        organ = ''.join(res.xpath('//div[@class="syh_boxx"]//span[contains(text(),"发布机构：")]/text()').extract()).strip()
        organ = organ.split('：')[-1].strip()
    else:
        pub_no = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"文号")]/following::td[1]/text()').extract()).strip()
        index_no = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"索 引 号")]/following::td[1]/text()').extract()).strip()
        # subject = ''.join(res.xpath('//td[contains(text(),"主题:")]/following::td[1]/text()').extract()).strip()
        # subject_word = ''.join(res.xpath('//div[@class="xxgktypeinfo"]//strong[contains(text(),"主题词:")]/parent::td[1]/text()').extract()).strip()
        written_date = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"生成日期")]/following::td[1]/text()').extract()).strip()
        # impl_date = ''.join(res.xpath('//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
        # invalid_date = ''.join(res.xpath('//td[contains(text(),"失效日期:")]/following::td[1]/text()').extract()).strip()
        legal_status = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
        organ = ''.join(res.xpath('//table[@class="tp-table tp-pc"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '宁德' + organ

    fulltext_xpath = '//div[contains(@class,"detailCont")]|//div[@id="detailCont"]|//div[@class="TRS_Editor"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99290'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "NINGDE"
    zt_provider = "ningdegovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[@class="fj"]|//div[contains(@class,"fj_BIG_boX")]|//div[@class="qz-tab"])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#  上海市黄浦区
def policy_shhuangpuarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    provider_url = article_json['url']
    res = Selector(text=html)
    pub_no = ''.join(res.xpath('//ul[@id="tablecontent"]//span[contains(text(),"发文字号")]/following::div[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//ul[@id="tablecontent"]//span[contains(text(),"索引号")]/following::div[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//ul[@id="tablecontent"]//span[contains(text(),"主题分类")]/following::div[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//ul[@id="tablecontent"]//span[contains(text(),"发文机关")]/following::div[1]/text()').extract()).strip()
    if organ.startswith('区'):
        organ = '上海市黄浦' + organ

    fulltext_xpath = '//div[@id="ivs_content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99230'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "SHHUANGPU"
    zt_provider = "shhuangpugovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


# 上海市虹口区
def policy_shhkarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    if '"custom":' in callmodel.para_dicts['data']['1_1']['html']:
        html_json = json.loads(callmodel.para_dicts['data']['1_1']['html'])
        html = html_json['custom']['data']['infocontent']
        if not html:
            html = ''
        article_json = json.loads(callmodel.sql_model.article_json)
        title = article_json['title']
        pub_date = clean_pubdate(article_json['pub_date'])
        pub_year = pub_date[:4]
        provider_url = article_json['url']
        res = Selector(text=html)
        title = html_json['custom']['data']['title'].replace("\n", ' ').replace("\r", ' ').strip()
        if not title:
            title = article_json['title'].strip()
        pub_no = html_json['custom']['data']['documentnumber']
        index_no = html_json['custom']['data']['identifier']
        subject = html_json['custom']['data']['govthemename']
        written_date = html_json['custom']['data']['infodate']
        organ = html_json['custom']['data']['publishername']
        if organ.startswith('区'):
            organ = '上海市虹口' + organ

        fulltext = html

        down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
        sub_db_id = '99235'
        rawid = callmodel.sql_model.rawid
        lngid = BaseLngid().GetLngid(sub_db_id, rawid)
        product = "SHHK"
        zt_provider = "shhkgovpolicy"
        data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
        print(lngid)

        data['title'] = title
        data['provider_url'] = provider_url
        data['pub_date'] = clean_pubdate(pub_date)
        data['pub_year'] = pub_year
        data['pub_no'] = pub_no
        data['organ'] = organ
        data['index_no'] = index_no
        data['written_date'] = clean_pubdate(written_date)
        # data['impl_date'] = clean_pubdate(impl_date)
        # data['invalid_date'] = clean_pubdate(invalid_date)
        data['subject'] = subject
        # data['subject_word'] = subject_word
        # data['legal_status'] = legal_status

        save_data.append({'table': 'policy_latest', 'data': data})
        full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
        save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
        result.save_data = save_data

        file_info1 = get_file_info(data, res, f'(//body)')
        file_info2 = list()
        file_infos = html_json['custom']['data']['attach']
        for file_info in file_infos:
            purl = parse.urljoin(provider_url, file_info["attachurl"])
            dic = {'url': purl, 'name': file_info["attachname"], 'pub_year': pub_year, 'keyid': lngid}
            file_info2.append(dic)
        file_info = file_info1 + file_info2
        di_model_bef = DealUpdateModel()
        if file_info:
            di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
        else:
            di_model_bef.update.update({"other_dicts": "{}"})
        di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                                   "task_name": callmodel.sql_model.task_name})
        result.befor_dicts.update_list.append(di_model_bef)
    else:
        html = callmodel.para_dicts['data']['1_1']['html']
        article_json = json.loads(callmodel.sql_model.article_json)
        title = article_json['title']
        pub_date = clean_pubdate(article_json['pub_date'])
        pub_year = pub_date[:4]
        provider_url = article_json['url']
        res = Selector(text=html)
        title = ''.join(res.xpath('//h1[@id="title"]/text()').extract()).strip()
        if not title:
            title = article_json['title'].strip()

        fulltext_xpath = '//div[@id="ivs_content"]'
        fulltext = res.xpath(fulltext_xpath).extract_first()
        if not fulltext:
            raise Exception

        down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
        sub_db_id = '99235'
        rawid = callmodel.sql_model.rawid
        lngid = BaseLngid().GetLngid(sub_db_id, rawid)
        product = "SHHK"
        zt_provider = "shhkgovpolicy"
        data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
        print(lngid)

        data['title'] = title
        data['provider_url'] = provider_url
        data['pub_date'] = clean_pubdate(pub_date)
        data['pub_year'] = pub_year
        # data['pub_no'] = pub_no
        # data['organ'] = organ
        # data['index_no'] = index_no
        # data['written_date'] = clean_pubdate(written_date)
        # data['impl_date'] = clean_pubdate(impl_date)
        # data['invalid_date'] = clean_pubdate(invalid_date)
        # data['subject'] = subject
        # data['subject_word'] = subject_word
        # data['legal_status'] = legal_status

        save_data.append({'table': 'policy_latest', 'data': data})
        full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
        save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
        result.save_data = save_data

        file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
        file_info2 = get_file_info(data, res, f'(//p[@id="NEWS"])')
        file_info = file_info1 + file_info2
        di_model_bef = DealUpdateModel()
        if file_info:
            di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
        else:
            di_model_bef.update.update({"other_dicts": "{}"})
        di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                                   "task_name": callmodel.sql_model.task_name})
        result.befor_dicts.update_list.append(di_model_bef)
    return result


#上海市浦东新区
def policy_pudongarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    provider_url = article_json['url']
    res = Selector(text=html)

    pub_no = ''.join(res.xpath('//table[@id="tp"]//td[contains(text(),"文件编号")]/following::span[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@id="tp"]//td[contains(text(),"索引号")]/following::span[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//div[@class="data-more"]//td[contains(text(),"主题分类:")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@id="tp"]//td[contains(text(),"发布机构")]/following::span[1]/text()').extract()).strip()
    if organ.startswith('区'):
        organ = '上海市浦东新' + organ

    fulltext_xpath = '//div[@id="ivs_content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99240'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "PUDONG"
    zt_provider = "pudonggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result
