import datetime
import json
import time
import math
import re
import traceback
import urllib
from urllib import parse
import base64
import requests

from parsel import Selector
from re_common.baselibrary.database.mysql import json_update
from re_common.baselibrary.utils.basedict import BaseDicts
from re_common.baselibrary.utils.basetime import BaseTime
from re_common.baselibrary.utils.baseurl import BaseUrl
from re_common.vip.baseencodeid import BaseLngid

from apps.crawler_platform.core_platform.core_sql import CoreSqlValue
from apps.crawler_platform.core_platform.g_model import DealModel, CallBackModel, DealInsertModel, DealUpdateModel, \
    OperatorSqlModel, DealItemModel, \
    EtlDealModel, PolicyListModel, PolicyArticleModel

__all__ = [
    "policy_fzggwcqlist_callback",
    "policy_fzggwcqarticle_callback",
    "policy_fzggwcqarticle_etl_callback",
    "policy_jjxxwcqlist_callback",
    "policy_jjxxwcqarticle_callback",
    "policy_jjxxwcqarticle_etl_callback",
    "policy_kjjcqlist_callback",
    "policy_kjjcqarticle_callback",
    "policy_kjjcqarticle_etl_callback",
    "policy_jwcqlist_callback",
    "policy_jwcqarticle_callback",
    "policy_jwcqarticle_etl_callback",
    "policy_mzjcqlist_callback",
    "policy_mzjcqarticle_callback",
    "policy_mzjcqarticle_etl_callback",
    "policy_czjcqlist_callback",
    "policy_czjcqarticle_callback",
    "policy_czjcqarticle_etl_callback",
    "policy_rlsbjcqlist_callback",
    "policy_rlsbjcqarticle_callback",
    "policy_rlsbjcqarticle_etl_callback",
    "policy_nyncwcqlist_callback",
    "policy_nyncwcqarticle_callback",
    "policy_nyncwcqarticle_etl_callback",
    "policy_zfcxjwcqlist_callback",
    "policy_zfcxjwcqarticle_callback",
    "policy_zfcxjwcqarticle_etl_callback",
    "policy_wsjkwcqlist_callback",
    "policy_wsjkwcqarticle_callback",
    "policy_wsjkwcqarticle_etl_callback",
    "policy_cqyzlist_callback",
    "policy_cqyzarticle_callback",
    "policy_cqyzarticle_etl_callback",
    "policy_wzlist_callback",
    "policy_wzarticle_callback",
    "policy_wzarticle_etl_callback",
    "policy_fllist_callback",
    "policy_flarticle_callback",
    "policy_flarticle_etl_callback",
    "policy_ddklist_callback",
    "policy_ddkarticle_callback",
    "policy_ddkarticle_etl_callback",
    "policy_cqjblist_callback",
    "policy_cqjbarticle_callback",
    "policy_cqjbarticle_etl_callback",
    "policy_cqspblist_callback",
    "policy_cqspbarticle_callback",
    "policy_cqspbarticle_etl_callback",
    "policy_cqjlplist_callback",
    "policy_cqjlparticle_callback",
    "policy_cqjlparticle_etl_callback",
    "policy_cqnalist_callback",
    "policy_cqnaarticle_callback",
    "policy_cqnaarticle_etl_callback",
    "policy_beibeilist_callback",
    "policy_beibeiarticle_callback",
    "policy_beibeiarticle_etl_callback",
    "policy_cqqjlist_callback",
    "policy_cqqjarticle_callback",
    "policy_cqqjarticle_etl_callback",
    "policy_dazulist_callback",
    "policy_dazuarticle_callback",
    "policy_dazuarticle_etl_callback",
    "policy_ybqlist_callback",
    "policy_ybqarticle_callback",
    "policy_ybqarticle_etl_callback",
    "policy_cqbnlist_callback",
    "policy_cqbnarticle_callback",
    "policy_cqbnarticle_etl_callback",
    "policy_qianjianglist_callback",
    "policy_qianjiangarticle_callback",
    "policy_qianjiangarticle_etl_callback",
    "policy_cqcslist_callback",
    "policy_cqcsarticle_callback",
    "policy_cqcsarticle_etl_callback",
    "policy_jiangjinlist_callback",
    "policy_jiangjinarticle_callback",
    "policy_jiangjinarticle_etl_callback",
    "policy_hclist_callback",
    "policy_hcarticle_callback",
    "policy_hcarticle_etl_callback",
    "policy_cqyclist_callback",
    "policy_cqycarticle_callback",
    "policy_cqycarticle_etl_callback",
    "policy_cqnclist_callback",
    "policy_cqncarticle_callback",
    "policy_cqncarticle_etl_callback",
    "policy_bishanlist_callback",
    "policy_bishanarticle_callback",
    "policy_bishanarticle_etl_callback",
    "policy_cqstllist_callback",
    "policy_cqstlarticle_callback",
    "policy_cqstlarticle_etl_callback",
    "policy_cqtnlist_callback",
    "policy_cqtnarticle_callback",
    "policy_cqtnarticle_etl_callback",
    "policy_rongchanglist_callback",
    "policy_rongchangarticle_callback",
    "policy_rongchangarticle_etl_callback",
    "policy_cqkzlist_callback",
    "policy_cqkzarticle_callback",
    "policy_cqkzarticle_etl_callback",
    "policy_cqlplist_callback",
    "policy_cqlparticle_callback",
    "policy_cqlparticle_etl_callback",
    "policy_cqwllist_callback",
    "policy_cqwlarticle_callback",
    "policy_cqwlarticle_etl_callback",
    "policy_cqcklist_callback",
    "policy_cqckarticle_callback",
    "policy_cqckarticle_etl_callback",
    "policy_cqfdlist_callback",
    "policy_cqfdarticle_callback",
    "policy_cqfdarticle_etl_callback",
    "policy_cqsdjlist_callback",
    "policy_cqsdjarticle_callback",
    "policy_cqsdjarticle_etl_callback",
    "policy_zhongxianlist_callback",
    "policy_zhongxianarticle_callback",
    "policy_zhongxianarticle_etl_callback",
    "policy_yunyanglist_callback",
    "policy_yunyangarticle_callback",
    "policy_yunyangarticle_etl_callback",
    "policy_cqfjlist_callback",
    "policy_cqfjarticle_callback",
    "policy_cqfjarticle_etl_callback",
    "policy_cqwslist_callback",
    "policy_cqwsarticle_callback",
    "policy_cqwsarticle_etl_callback",
    "policy_cqwxlist_callback",
    "policy_cqwxarticle_callback",
    "policy_cqwxarticle_etl_callback",
    "policy_cqszxlist_callback",
    "policy_cqszxarticle_callback",
    "policy_cqszxarticle_etl_callback",
    "policy_cqxslist_callback",
    "policy_cqxsarticle_callback",
    "policy_cqxsarticle_etl_callback",
    "policy_youyanglist_callback",
    "policy_youyangarticle_callback",
    "policy_youyangarticle_etl_callback",
    "policy_psxlist_callback",
    "policy_psxarticle_callback",
    "policy_psxarticle_etl_callback",
    "policy_fgwsclist_callback",
    "policy_fgwscarticle_callback",
    "policy_fgwscarticle_etl_callback",
    "policy_jxtsclist_callback",
    "policy_jxtscarticle_callback",
    "policy_jxtscarticle_etl_callback",
    "policy_kjtsclist_callback",
    "policy_kjtscarticle_callback",
    "policy_kjtscarticle_etl_callback",
    "policy_edusclist_callback",
    "policy_eduscarticle_callback",
    "policy_eduscarticle_etl_callback",
    "policy_mztsclist_callback",
    "policy_mztscarticle_callback",
    "policy_mztscarticle_etl_callback",
    "policy_cztsclist_callback",
    "policy_cztscarticle_callback",
    "policy_cztscarticle_etl_callback",
    "policy_rstsclist_callback",
    "policy_rstscarticle_callback",
    "policy_rstscarticle_etl_callback",
    "policy_nynctsclist_callback",
    "policy_nynctscarticle_callback",
    "policy_nynctscarticle_etl_callback",
    "policy_jstsclist_callback",
    "policy_jstscarticle_callback",
    "policy_jstscarticle_etl_callback",
    "policy_wsjkwsclist_callback",
    "policy_wsjkwscarticle_callback",
    "policy_wsjkwscarticle_etl_callback",
    "policy_zglist_callback",
    "policy_zgarticle_callback",
    "policy_zgarticle_etl_callback",
    "policy_panzhihualist_callback",
    "policy_panzhihuaarticle_callback",
    "policy_panzhihuaarticle_etl_callback",
    "policy_luzhoulist_callback",
    "policy_luzhouarticle_callback",
    "policy_luzhouarticle_etl_callback",
    "policy_deyanglist_callback",
    "policy_deyangarticle_callback",
    "policy_deyangarticle_etl_callback",
    "policy_mylist_callback",
    "policy_myarticle_callback",
    "policy_myarticle_etl_callback",
    "policy_cngylist_callback",
    "policy_cngylist1_callback",
    "policy_cngyarticle_callback",
    "policy_cngyarticle_etl_callback",
    "policy_suininglist_callback",
    "policy_suiningarticle_callback",
    "policy_suiningarticle_etl_callback",
    "policy_neijianglist_callback",
    "policy_neijianglist1_callback",
    "policy_neijiangarticle_callback",
    "policy_neijiangarticle_etl_callback",
    "policy_leshanlist_callback",
    "policy_leshanlist1_callback",
    "policy_leshanlist2_callback",
    "policy_leshanarticle_callback",
    "policy_leshanarticle_etl_callback",
    "policy_leshanarticle1_callback",
    "policy_leshanarticle1_etl_callback",
    "policy_nanchonglist_callback",
    "policy_nanchonglist1_callback",
    "policy_nanchongarticle_callback",
    "policy_nanchongarticle_etl_callback",
    "policy_mslist_callback",
    "policy_msarticle_callback",
    "policy_msarticle_etl_callback",
    "policy_yibinlist_callback",
    "policy_yibinarticle_callback",
    "policy_yibinarticle_etl_callback",
    "policy_guanganlist_callback",
    "policy_guanganarticle_callback",
    "policy_guanganarticle_etl_callback",
    "policy_dazhoulist_callback",
    "policy_dazhoulist1_callback",
    "policy_dazhouarticle_callback",
    "policy_dazhouarticle_etl_callback",
    "policy_yaanlist_callback",
    "policy_yaanarticle_callback",
    "policy_yaanarticle_etl_callback",
    "policy_cnbzlist_callback",
    "policy_cnbzarticle_callback",
    "policy_cnbzarticle_etl_callback",
    "policy_ziyanglist_callback",
    "policy_ziyangarticle_callback",
    "policy_ziyangarticle_etl_callback",
    "policy_abazhoulist_callback",
    "policy_abazhouarticle_callback",
    "policy_abazhouarticle_etl_callback",
    "policy_gzzlist_callback",
    "policy_gzzlist1_callback",
    "policy_gzzarticle_callback",
    "policy_gzzarticle_etl_callback",
    "policy_lszlist_callback",
    "policy_lszarticle_callback",
    "policy_lszarticle_etl_callback",
    "policy_yndrcynlist_callback",
    "policy_yndrcynarticle_callback",
    "policy_yndrcynarticle_etl_callback",
    "policy_gxtynlist_callback",
    "policy_gxtynarticle_callback",
    "policy_gxtynarticle_etl_callback",
    "policy_kjtynlist_callback",
    "policy_kjtynarticle_callback",
    "policy_kjtynarticle_etl_callback",
    "policy_jytynlist_callback",
    "policy_jytynarticle_callback",
    "policy_jytynarticle_etl_callback",
    "policy_ynmzynlist_callback",
    "policy_ynmzynarticle_callback",
    "policy_ynmzynarticle_etl_callback",
    "policy_cztynlist_callback",
    "policy_cztynarticle_callback",
    "policy_cztynarticle_etl_callback",
    "policy_hrssynlist_callback",
    "policy_hrssynarticle_callback",
    "policy_hrssynarticle_etl_callback",
    "policy_nyncynlist_callback",
    "policy_nyncynarticle_callback",
    "policy_nyncynarticle_etl_callback",
    "policy_zfcxjstynlist_callback",
    "policy_zfcxjstynarticle_callback",
    "policy_zfcxjstynarticle_etl_callback",
    "policy_ynswsjkwynlist_callback",
    "policy_ynswsjkwynarticle_callback",
    "policy_ynswsjkwynarticle_etl_callback",
    "policy_kmlist_callback",
    "policy_kmarticle_callback",
    "policy_kmarticle_etl_callback",
    "policy_qjlist_callback",
    "policy_qjarticle_callback",
    "policy_qjarticle_etl_callback",
    "policy_yuxilist_callback",
    "policy_yuxiarticle_callback",
    "policy_yuxiarticle_etl_callback",
    "policy_ztlist_callback",
    "policy_ztarticle_callback",
    "policy_ztarticle_etl_callback",
    "policy_baoshanlist_callback",
    "policy_baoshanarticle_callback",
    "policy_baoshanarticle_etl_callback",
    "policy_lijianglist_callback",
    "policy_lijiangarticle_callback",
    "policy_lijiangarticle_etl_callback",
    "policy_puershilist_callback",
    "policy_puershiarticle_callback",
    "policy_puershiarticle_etl_callback",
    "policy_lincanglist_callback",
    "policy_lincangarticle_callback",
    "policy_lincangarticle_etl_callback",
    "policy_dhlist_callback",
    "policy_dharticle_callback",
    "policy_dharticle_etl_callback",
    "policy_nujianglist_callback",
    "policy_nujiangarticle_callback",
    "policy_nujiangarticle_etl_callback",
    "policy_dalilist_callback",
    "policy_daliarticle_callback",
    "policy_daliarticle_etl_callback",
    "policy_cxzlist_callback",
    "policy_cxzarticle_callback",
    "policy_cxzarticle_etl_callback",
    "policy_hhlist_callback",
    "policy_hharticle_callback",
    "policy_hharticle_etl_callback",
    "policy_ynwslist_callback",
    "policy_ynwsarticle_callback",
    "policy_ynwsarticle_etl_callback",
    "policy_xsbnlist_callback",
    "policy_xsbnarticle_callback",
    "policy_xsbnarticle_etl_callback",


    "policy_chengduarticle_etl_callback",
    "policy_diqingarticle_etl_callback",
]


def clean_pubdate(value):
    if not value:
        return ''
    value = re.sub('\D', '', value)
    value = value[:8]
    value = value.ljust(8, '0')
    if int(value[4:6]) > 12:
        value = value[:4] + '0000'
    if int(value[6:]) > 31:
        value = value[:6] + '00'
    return value


def cleaned(value):
    if value:
        if type(value) is list:
            return ' '.join([i.strip() for i in value]).strip()
        else:
            return value.strip()
    else:
        return ""


def judge_url(url):
    if len(url) > 500:
        return True
    if '/' not in url.replace('//', ''):
        return True
    if 'mailt' in url or 'data:image/' in url or 'javascript:' in url or '#' in url or 'weixin.qq' in url:
        return True
    if '.baidu' in url or '。' in url or '@163' in url or '.cn/）' in url or '8080）' in url or 'cn）' in url:
        return True
    if 'cn，' in url or 'com，' in url or 'cn,' in url or 'haosou.' in url or 'www.so.' in url or 'file://' in url:
        return True
    if 'C:' in url or 'baike.soso' in url or 'weibo.com' in url or 'baike.sogou' in url or 'html）' in url:
        return True
    if 'shtml）' in url or 'phtml）' in url or 'wx.qq.' in url or 'bing.com' in url:
        return True
    if url.endswith('/') or url.endswith('.net') or url.endswith('.asp') or url.endswith('.shtml'):
        return True
    if url.endswith('/share') or url.endswith('.exe') or url.endswith('.xml'):
        return True
    if url.endswith('pdf}') or url.endswith('jpg}'):
        return True
    ends = url.split('/')[-1].lower()
    if not ends:
        return True
    if ends.endswith('.htm') or ends.endswith('.shtml') or ends.endswith('.jhtml') or ends.endswith('.org'):
        return True
    if ends.endswith('xhtml') or ends.endswith('.phtml') or ends.endswith('.cn') or ends.endswith('.com'):
        return True
    if ends.endswith('.html') or ends.endswith('.mht') or ends.endswith('.html%20'):
        return True
    if '.jsp' in ends and len(ends.split('.', 1)[1]) < 7:
        return True

    return False


def get_file_info(data, res, xpath):
    url = data['provider_url']
    pub_year = data['pub_year']
    keyid = data['keyid']
    file_info = list()
    tag_list = res.xpath(f'{xpath}//a')
    url_list = list()
    for tag in tag_list:
        file_href = tag.xpath('@href').extract_first()
        if file_href and file_href.strip():
            file_href = file_href.strip()
            try:
                file_url = parse.urljoin(url, file_href)
            except:
                continue
            if judge_url(file_url):
                continue
            name = ''.join(tag.xpath('.//text()').extract()).strip()
            if file_url in url_list:
                continue
            else:
                url_list.append(file_url)
            file_info.append({'url': file_url, 'name': name, 'pub_year': pub_year, 'keyid': keyid})
    img_hrefs = res.xpath(f'{xpath}//*/@src').extract()
    for img_href in img_hrefs:
        if img_href.strip():
            img_href = img_href.strip()
            img_url = parse.urljoin(url, img_href)
            if judge_url(img_url):
                continue
            if img_url in url_list:
                continue
            else:
                url_list.append(img_url)
            file_info.append({'url': img_url, 'name': img_href, 'pub_year': pub_year, 'keyid': keyid})
    return file_info


def deal_sql_dict(sql_dict):
    sql_dict.pop("id")
    sql_dict.pop("update_time")
    sql_dict.pop("create_time")
    sql_dict.pop("null_dicts")
    sql_dict.pop("err_msg")
    sql_dict.pop("other_dicts")
    sql_dict.pop("state")
    sql_dict.pop("failcount")
    return sql_dict


def init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider):
    data = dict()
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = product
    data['sub_db'] = 'POLICY'
    data['sub_db_id'] = sub_db_id
    data['provider'] = 'CNGOV'
    data['zt_provider'] = zt_provider
    data['source_type'] = '16'
    data['latest_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    data['language'] = 'ZH'
    return data


def init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year):
    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '16'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    return full_text_data


#   重庆市发展和改革委员会
def policy_fzggwcqlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="gkm-crbox"]/ul/li|//table[@class="zcwjk-list"]/tr')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('td[2]/a/@href|div/a/@href|a/@href').extract_first()
            if not href:
                continue
            base_url = f'http://fzggw.cq.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99480'
            article_json["url"] = url
            article_json["title"] = li.xpath('td[2]/a/p[@class="tit"]/text()|div/a/text()|a/text()').extract_first().strip()
            if 'zfxxgk/fdzdgknr/ghxx' == callmodel.sql_model.list_rawid:
                article_json["pub_date"] = li.xpath('span[2]/text()').extract_first().strip()
            else:
                article_json["pub_date"] = li.xpath('span/text()|td[2]/a/p[@class="info"]/span[@class="time"]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_fzggwcqarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_fzggwcqarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//span[contains(text(),"有") and contains(text(),"性")]/parent::div[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('市'):
        organ = '重庆' + organ

    fulltext_xpath = '//div[@class="zwxl-article"]|//div[@class="zcwjk-xlcon"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99480'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "FZGGWCQ"
    zt_provider = "fzggwcqgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   重庆市经济和信息化委员会
def policy_jjxxwcqlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="center-list"]/li|//ul[@class="right-list"]/li|//div[@class="zsj-l_con"]/ul/li|//table[@class="zcwjk-list"]/tr')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('td[2]/a/@href|a/@href').extract_first()
            if not href:
                continue
            base_url = f'https://jjxxw.cq.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99481'
            article_json["url"] = url
            article_json["title"] = li.xpath('td[2]/a/p[@class="tit"]/text()|a/text()').extract_first().strip()
            if not li.xpath('span[@class="l-sj"]/text()|span/text()').extract_first():
                continue
            article_json["pub_date"] = li.xpath('span[@class="l-sj"]/text()|span/text()|td[2]/a/p[@class="info"]/span[@class="time"]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jjxxwcqarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jjxxwcqarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if not pub_date or '0000' in pub_date:
        pub_date_info = res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发布日期")]/following::td[1]/text()').extract_first()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    pub_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//span[contains(text(),"有") and contains(text(),"性")]/parent::div[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '重庆' + organ

    fulltext_xpath = '//div[@class="zwxl-article"]|//div[@class="zcwjk-xlcon"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99481'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "JJXXWCQ"
    zt_provider = "jjxxwcqgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   重庆市科学技术局
def policy_kjjcqlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="tab-item"]/li|//ul[@class="zsj-fr-main"]/li|//table[@class="zcwjk-list"]/tr')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('td[2]/a/@href|div/a/@href|a/@href').extract_first()
            if not href:
                continue
            base_url = f'http://kjj.cq.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99482'
            article_json["url"] = url
            article_json["title"] = li.xpath('td[2]/a/p[@class="tit"]/text()|td[2]/a/p[@class="tit"]/text()|a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()|td[2]/a/p[@class="info"]/span[@class="time"]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_kjjcqarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_kjjcqarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//span[contains(text(),"有") and contains(text(),"性")]/parent::div[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('市'):
        organ = '重庆' + organ

    fulltext_xpath = '//div[contains(@class,"zwxl-article")]|//div[@class="zcwjk-xlcon"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99482'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "KJJCQ"
    zt_provider = "kjjcqgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   重庆市教育委员会
def policy_jwcqlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="information-l-content"]/ul/li|//div[@class="public-r-content"]/table/tr')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href|td[1]/a/@href').extract_first()
            if not href:
                continue
            base_url = f'http://jw.cq.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99483'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()|td[1]/a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()|td[3]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jwcqarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jwcqarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//p[@class="tit"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//span[contains(text(),"有") and contains(text(),"性")]/parent::div[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('市'):
        organ = '重庆' + organ

    fulltext_xpath = '//div[contains(@class,"zwxl-article")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99483'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "JWCQ"
    zt_provider = "jwcqgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   重庆市民政局
def policy_mzjcqlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="tab-item"]/li|//ul[@class="zsj-fr-main"]/li|//ul[@class="info"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            if not href:
                continue
            base_url = f'http://mzj.cq.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99484'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span[1]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_mzjcqarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_mzjcqarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"发文字号")]/following::span[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"索引号")]/following::span[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"主题分类")]/following::span[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"成文日期")]/following::span[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//span[contains(text(),"有") and contains(text(),"性")]/parent::div[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"发布机构")]/following::span[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('市'):
        organ = '重庆' + organ

    fulltext_xpath = '//div[contains(@class,"zwxl-article")]|//div[@class="Content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99484'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "MZJCQ"
    zt_provider = "mzjcqgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   重庆市财政局
def policy_czjcqlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="list"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            if not href:
                continue
            base_url = f'http://czj.cq.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99485'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/span[1]/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('a/span[2]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_czjcqarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_czjcqarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//p[@class="tit"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"发文字号")]/following::span[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"索引号")]/following::span[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"主题分类")]/following::span[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"成文日期")]/following::span[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//span[contains(text(),"有") and contains(text(),"性")]/parent::div[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"发布机构")]/following::span[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('市'):
        organ = '重庆' + organ

    fulltext_xpath = '//div[contains(@class,"zwxl-article")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99485'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CZJCQ"
    zt_provider = "czjcqgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   重庆市人力资源和社会保障局
def policy_rlsbjcqlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="tab-item"]/li|//ul[@class="zsj-fr-main"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://rlsbj.cq.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99486'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_rlsbjcqarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_rlsbjcqarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//p[@class="tit"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//span[contains(text(),"有") and contains(text(),"性")]/parent::div[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('市'):
        organ = '重庆' + organ

    fulltext_xpath = '//div[contains(@class,"zwxl-article")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99486'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "RLSBJCQ"
    zt_provider = "rlsbjcqgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   重庆市农业农村委员会
def policy_nyncwcqlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="gl-list"]/li|//ul[@class="zsj-fr-main"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://nyncw.cq.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99487'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_nyncwcqarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_nyncwcqarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//p[@class="tit"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//span[contains(text(),"有") and contains(text(),"性")]/parent::div[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('市'):
        organ = '重庆' + organ

    fulltext_xpath = '//div[contains(@class,"zwxl-article")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99487'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "NYNCWJCQ"
    zt_provider = "nyncwcqgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   重庆市住房和城乡建设委员会
def policy_zfcxjwcqlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="new-list"]/li|//ul[@class="zsj-fr-main"]/li|//ul[@class="gl-l"]/li|//ul[@class="zcwj-list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://zfcxjw.cq.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99488'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_zfcxjwcqarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_zfcxjwcqarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//p[@class="tit"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//span[contains(text(),"有") and contains(text(),"性")]/parent::div[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('市'):
        organ = '重庆' + organ

    fulltext_xpath = '//div[contains(@class,"zwxl-article")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99488'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "ZFCXJWCQ"
    zt_provider = "zfcxjwcqgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   重庆市卫生健康委员会
def policy_wsjkwcqlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[contains(@class,"gl-list")]/li|//ul[@class="zsj-fr-main"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://wsjkw.cq.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99489'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_wsjkwcqarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_wsjkwcqarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//p[@class="tit"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//span[contains(text(),"有") and contains(text(),"性")]/parent::div[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('市'):
        organ = '重庆' + organ

    fulltext_xpath = '//div[contains(@class,"zwxl-article")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99489'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "WSJKWCQ"
    zt_provider = "wsjkwcqgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   重庆市渝中区
def policy_cqyzlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="de-ul-font"]/li|//ul[@class="zsj-fr-main zsj-fr-main-d"]/li|//ul[@class="ul-style-1"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            if not href:
                continue
            base_url = f'http://www.cqyz.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99490'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span[@class="de-data pull-left"]/text()|div/span[@class="time"]/text()|span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cqyzarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_cqyzarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//p[@class="title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="table-pc"]//td[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="table-pc"]//td[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="table-pc"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="table-pc"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="table-pc"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="table-pc"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('区'):
        organ = '重庆市渝中区' + organ

    fulltext_xpath = '//div[@class="article-content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99490'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CQYZ"
    zt_provider = "cqyzgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   重庆市万州区
def policy_wzlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="center-list"]/li|//ul[@class="right-list"]/li|//ul[@class="zsj-fr-main"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.wz.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99491'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_wzarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_wzarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//p[@class="tit"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('区'):
        organ = '重庆市万州' + organ

    fulltext_xpath = '//div[contains(@class,"zwxl-article")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99491'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "WZ"
    zt_provider = "wzgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   重庆市涪陵区
def policy_fllist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="r-list-16 sany_update_watch"]/li|//ul[@class="zsj-fr-main sany_update_watch"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.fl.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99492'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_flarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_flarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//p[@class="tit"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('区'):
        organ = '重庆市涪陵' + organ

    fulltext_xpath = '//div[contains(@class,"zwxl-article")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99492'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "FL"
    zt_provider = "flgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   重庆市大渡口区
def policy_ddklist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="r-list-16"]/li|//ul[@class="gkzd-list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.ddk.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99493'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_ddkarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_ddkarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//p[@class="tit"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('区'):
        organ = '重庆市涪陵' + organ

    fulltext_xpath = '//div[contains(@class,"zwxl-article")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99493'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "DDK"
    zt_provider = "ddkgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   重庆市江北区
def policy_cqjblist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="pList01"]/li|//div[@class="xx-infos"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a[1]/@href').extract_first()
            base_url = f'http://www.cqjb.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99494'
            article_json["url"] = url
            article_json["title"] = li.xpath('a[1]/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cqjbarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_cqjbarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[contains(@class,"zw-table")]//span[contains(text(),"发文字号")]/following::span[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[contains(@class,"zw-table")]//span[contains(text(),"索引号")]/following::span[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[contains(@class,"zw-table")]//span[contains(text(),"主题分类")]/following::span[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[contains(@class,"zw-table")]//span[contains(text(),"成文日期")]/following::span[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[contains(@class,"zw-table")]//span[contains(text(),"有效性")]/following::span[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[contains(@class,"zw-table")]//span[contains(text(),"发布机构")]/following::span[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('区'):
        organ = '重庆市江北' + organ

    fulltext_xpath = '//div[contains(@class,"zwxl-article")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99494'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CQJB"
    zt_provider = "cqjbgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   重庆市沙坪坝区
def policy_cqspblist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="pList01"]/li|//ul[@class="xxgkzl-list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.cqspb.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99495'
            article_json["url"] = url
            article_json["title"] = li.xpath('a[1]/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cqspbarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_cqspbarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="title"]//text()|//div[@class="news_conent_two_title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    if 'content-table' in html:
        pub_no = ''.join(res.xpath('//table[@class="content-table"]//td[contains(text(),"文号")]/following::td[1]/text()').extract()).strip()
        index_no = ''.join(res.xpath('//table[@class="content-table"]//td[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
        subject = ''.join(res.xpath('//table[@class="content-table"]//td[contains(text(),"信息分类")]/following::td[1]/text()').extract()).strip()
        subject_word = ''.join(res.xpath('//table[@class="content-table"]//td[contains(text(),"主题词")]/following::td[1]/text()').extract()).strip()
        written_date = ''.join(res.xpath('//table[@class="content-table"]//td[contains(text(),"生成日期")]/following::td[1]/text()').extract()).strip()
        legal_status = ''.join(res.xpath('//table[@class="content-table"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
        organ = ''.join(res.xpath('//table[@class="content-table"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    else:
        pub_no = ''.join(res.xpath('//div[contains(@class,"zw-table")]//span[contains(text(),"发文字号")]/following::span[1]/text()').extract()).strip()
        index_no = ''.join(res.xpath('//div[contains(@class,"zw-table")]//span[contains(text(),"索引号")]/following::span[1]/text()').extract()).strip()
        subject = ''.join(res.xpath('//div[contains(@class,"zw-table")]//span[contains(text(),"主题分类")]/following::span[1]/text()').extract()).strip()
        subject_word = ''
        written_date = ''.join(res.xpath('//div[contains(@class,"zw-table")]//span[contains(text(),"成文日期")]/following::span[1]/text()').extract()).strip()
        legal_status = ''.join(res.xpath('//div[contains(@class,"zw-table")]//span[contains(text(),"有效性")]/following::span[1]/text()').extract()).strip()
        organ = ''.join(res.xpath('//div[contains(@class,"zw-table")]//span[contains(text(),"发布机构")]/following::span[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('区'):
        organ = '重庆市沙坪坝' + organ

    fulltext_xpath = '//div[contains(@class,"zwxl-article")]|//div[@class="news_conent_two_text"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99495'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CQSPB"
    zt_provider = "cqspbgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   重庆市九龙坡区
def policy_cqjlplist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="new-list"]/li|//ul[@class="zsj-fr-main"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://cqjlp.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99496'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cqjlparticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_cqjlparticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//p[@class="tit"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('区'):
        organ = '重庆市九龙坡区' + organ

    fulltext_xpath = '//div[contains(@class,"zwxl-article")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99496'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CQJLP"
    zt_provider = "cqjlpgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   重庆市南岸区
def policy_cqnalist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="pList01"]/li|//div[@class="xx-infos"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a[1]/@href').extract_first()
            base_url = f'https://www.cqna.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99497'
            article_json["url"] = url
            article_json["title"] = li.xpath('a[1]/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cqnaarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_cqnaarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[contains(@class,"zw-table")]//span[contains(text(),"发文字号")]/following::span[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[contains(@class,"zw-table")]//span[contains(text(),"索引号")]/following::span[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[contains(@class,"zw-table")]//span[contains(text(),"主题分类")]/following::span[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[contains(@class,"zw-table")]//span[contains(text(),"成文日期")]/following::span[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[contains(@class,"zw-table")]//span[contains(text(),"有效性")]/following::span[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[contains(@class,"zw-table")]//span[contains(text(),"发布机构")]/following::span[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('区'):
        organ = '重庆市南岸' + organ

    fulltext_xpath = '//div[contains(@class,"zwxl-article")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99497'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CQNA"
    zt_provider = "cqnagovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   重庆市北碚区
def policy_beibeilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="news-list"]/li|//div[@class="infolist"]/ul/li|//div[@class="xx-infos xx-fy"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'https://www.beibei.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99498'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/span/text()|a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/span/text()|span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_beibeiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_beibeiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[contains(@class,"zw-table")]//span[contains(text(),"文章标题")]/following::span[1]/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if not pub_date:
        pub_date_info = ''.join(res.xpath('//div[contains(@class,"zw-table")]//span[contains(text(),"发布日期")]/following::span[1]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    if not pub_date:
        raise Exception
    pub_no = ''.join(res.xpath('//div[contains(@class,"zw-table")]//span[contains(text(),"发文字号")]/following::span[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[contains(@class,"zw-table")]//span[contains(text(),"索引号")]/following::span[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[contains(@class,"zw-table")]//span[contains(text(),"主题分类")]/following::span[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[contains(@class,"zw-table")]//span[contains(text(),"成文日期")]/following::span[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[contains(@class,"zw-table")]//span[contains(text(),"有效性")]/following::span[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[contains(@class,"zw-table")]//span[contains(text(),"发布机构")]/following::span[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('区'):
        organ = '重庆市北碚' + organ

    fulltext_xpath = '//div[contains(@class,"zwxl-article")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99498'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "BEIBEI"
    zt_provider = "beibeigovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   重庆市綦江区
def policy_cqqjlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="gl-list"]/li|//ul[@class="tab-item"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.cqqj.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99499'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cqqjarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_cqqjarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//p[@class="tit"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('区'):
        organ = '重庆市綦江' + organ

    fulltext_xpath = '//div[contains(@class,"zwxl-article")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99499'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CQQJ"
    zt_provider = "cqqjgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   重庆市大足区
def policy_dazulist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="dzgl-list"]/li|//ul[@class="zsj-fr-main"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.dazu.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99500'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_dazuarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_dazuarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//p[@class="tit"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('区'):
        organ = '重庆市大足' + organ

    fulltext_xpath = '//div[contains(@class,"zwxl-article")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99500'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "DAZU"
    zt_provider = "dazugovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   重庆市渝北区
def policy_ybqlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="center-list"]/li|//ul[@class="fr-main"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a[1]/@href').extract_first()
            base_url = f'http://www.ybq.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99501'
            article_json["url"] = url
            article_json["title"] = li.xpath('a[1]/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_ybqarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_ybqarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//p[@class="tit"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="zwgk_info_fkk"]//li[contains(text(),"发文字号")]/following::li[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="zwgk_info_fkk"]//li[contains(text(),"索引号")]/following::li[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="zwgk_info_fkk"]//li[contains(text(),"主题分类")]/following::li[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="zwgk_info_fkk"]//li[contains(text(),"生成日期")]/following::li[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="zwgk_info_fkk"]//li[contains(text(),"有效性")]/following::li[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="zwgk_info_fkk"]//li[contains(text(),"发布机构")]/following::li[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('区'):
        organ = '重庆市渝北' + organ

    fulltext_xpath = '//div[contains(@class,"zwxl-article")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99501'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "YBQ"
    zt_provider = "ybqgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   重庆市巴南区
def policy_cqbnlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="center-list"]/li|//ul[@class="right-list"]/li|//ul[@class="tab-item"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.cqbn.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99502'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cqbnarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_cqbnarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//p[@class="tit"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('区'):
        organ = '重庆市巴南' + organ

    fulltext_xpath = '//div[contains(@class,"zwxl-article")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99502'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CQBN"
    zt_provider = "cqbngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   重庆市黔江区
def policy_qianjianglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="fr-main"]/li|//ul[@class="news-list"]/li|//div[@class="list"]/div')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a[1]/@href|div[1]/a/@href').extract_first()
            base_url = f'https://www.qianjiang.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99503'
            article_json["url"] = url
            article_json["title"] = li.xpath('a[1]/text()|div[1]/a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()|.//span[contains(text(),"发布：")]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_qianjiangarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_qianjiangarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if 'class="content-type-list"' in html:
        pub_no = ''.join(res.xpath('//ul[@class="content-type-list"]//span[contains(text(),"发文字号")]/following::span[1]/text()').extract()).strip()
        index_no = ''.join(res.xpath('//ul[@class="content-type-list"]//span[contains(text(),"索引号")]/following::span[1]/text()').extract()).strip()
        subject = ''.join(res.xpath('//ul[@class="content-type-list"]//span[contains(text(),"主题分类")]/following::span[1]/text()').extract()).strip()
        written_date = ''.join(res.xpath('//ul[@class="content-type-list"]//span[contains(text(),"成文日期")]/following::span[1]/text()').extract()).strip()
        legal_status = ''.join(res.xpath('//ul[@class="content-type-list"]//span[contains(text(),"有效性")]/following::span[1]/text()').extract()).strip()
        organ = ''.join(res.xpath('//ul[@class="content-type-list"]//span[contains(text(),"发布机构")]/following::span[1]/text()').extract()).strip()
    else:
        pub_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
        index_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"索")]/following::td[1]/text()').extract()).strip()
        subject = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
        written_date = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
        legal_status = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
        organ = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('区'):
        organ = '重庆市黔江' + organ

    fulltext_xpath = '//div[contains(@class,"zwxl-article")]|//div[contains(@class,"qjcontent")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99503'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "QIANJIANG"
    zt_provider = "qianjianggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   重庆市长寿区
def policy_cqcslist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="new-list"]/li|//ul[@class="tab-item"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.cqcs.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99504'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cqcsarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_cqcsarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//p[@class="tit"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('区'):
        organ = '重庆市长寿' + organ

    fulltext_xpath = '//div[contains(@class,"zwxl-article")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99504'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CQCS"
    zt_provider = "cqcsgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   重庆市江津区
def policy_jiangjinlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="ul-style-1"]/li|//div[@class="right"]//tbody/tr')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('td[1]/a/@href|a/@href').extract_first()
            base_url = f'http://www.jiangjin.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99505'
            article_json["url"] = url
            article_json["title"] = li.xpath('td[1]/a/text()|a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('div/span/text()|td[2]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jiangjinarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jiangjinarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('区'):
        organ = '重庆市江津' + organ

    fulltext_xpath = '//div[contains(@class,"zwxl-article")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99505'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "JIANGJIN"
    zt_provider = "jiangjingovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   重庆市合川区
def policy_hclist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall('createPageHTML\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="lbbox_n"]/ul/li|//ul[@class="fr-main"]/li|//ul[@class="leftul"]/li|//ul[@class="zsj-fr-main"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a[1]/@href').extract_first()
            base_url = f'http://www.hc.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            if '文档' in rawid:
                rawid = rawid.replace('文档', '')
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99506'
            article_json["url"] = url
            article_json["title"] = li.xpath('a[1]/span[@class="nr"]/text()|a[1]/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('a[1]/span[@class="date"]/text()|span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_hcarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_hcarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="texttip"]//th[contains(text(),"标题")]/following::td[1]/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="nr_box"]//h1/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    # pub_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="texttip"]//th[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="texttip"]//th[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="texttip"]//th[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="texttip"]//th[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('区'):
        organ = '重庆市合川' + organ

    fulltext_xpath = '//div[contains(@class,"view")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99506'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "HC"
    zt_provider = "hcgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//p[contains(text(),"附件下载：")]/ancestor::div[1])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   重庆市永川区
def policy_cqyclist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xxgkzcjd' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="right-list"]/li|//ul[@class="zsj-fr-main"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.cqyc.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99507'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cqycarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_cqycarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"标题")]/following::td[1]/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//p[@class="tit"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('区'):
        organ = '重庆市永川' + organ

    fulltext_xpath = '//div[contains(@class,"zwxl-article")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99507'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CQYC"
    zt_provider = "cqycgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   重庆市南川区
def policy_cqnclist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'fdzdgknr' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//table[@class="cwx-table"]/tr|//div[@class="xx-1"]/a')
        else:
            li_list = res.xpath('//ul[@class="new-list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('td[1]/a/@href|a/@href|@href').extract_first()
            if not href:
                continue
            base_url = f'http://www.cqnc.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99508'
            article_json["url"] = url
            article_json["title"] = li.xpath('td[1]/a/text()|a/text()|p/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('td[3]/text()|a/span/text()|span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cqncarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_cqncarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//p[@class="tit"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"发文字号")]/following::span[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"索引号")]/following::span[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"主题分类")]/following::span[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"成文日期")]/following::span[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"有效性")]/following::span[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"发布机构")]/following::span[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('区'):
        organ = '重庆市南川' + organ

    fulltext_xpath = '//div[contains(@class,"zwxl-article")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99508'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CQNC"
    zt_provider = "cqncgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   重庆市璧山区
def policy_bishanlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//ul[@class="new-list"]/li|//ul[@class="zsj-fr-main"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.bishan.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99509'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_bishanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_bishanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//p[@class="tit"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('区'):
        organ = '重庆市璧山' + organ

    fulltext_xpath = '//div[contains(@class,"zwxl-article")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99509'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "BISHAN"
    zt_provider = "bishangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   重庆市铜梁区
def policy_cqstllist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//div[@class="news_list"]/ul/li|//ul[@class="zsj-fr-main"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'https://www.cqstl.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99510'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cqstlarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_cqstlarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    res = Selector(text=html)
    title = ''.join(res.xpath('//p[@class="tit"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_date_info = ''.join(res.xpath('//div[@class="zwxl-main"]//li[contains(text(),"发布日期")]/following::li[1]/text()').extract()).strip()
    pub_date = clean_pubdate(pub_date_info)
    pub_year = pub_date[:4]
    if not pub_date:
        pub_date = clean_pubdate(article_json['pub_date'])
        pub_year = pub_date[:4]
    pub_no = ''.join(res.xpath('//div[@class="zwxl-main"]//li[contains(text(),"发文字号")]/following::li[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="zwxl-main"]//li[contains(text(),"索引号")]/following::li[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="zwxl-main"]//li[contains(text(),"主题分类")]/following::li[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="zwxl-main"]//li[contains(text(),"成文日期")]/following::li[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="zwxl-main"]//li[contains(text(),"有效性")]/following::li[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="zwxl-main"]//li[contains(text(),"发布机构")]/following::li[1]/text()').extract()).strip()
    if organ.startswith('区'):
        organ = '重庆市铜梁' + organ

    fulltext_xpath = '//div[contains(@class,"zwxl-article")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99510'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CQSTL"
    zt_provider = "cqstlgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   重庆市潼南区
def policy_cqtnlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//ul[@class="new-list"]/li|//ul[@class="tab-item"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.cqtn.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99511'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('a/span/text()|span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cqtnarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_cqtnarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//p[@class="tit"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"发文字号")]/following::span[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"索引号")]/following::span[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"主题分类")]/following::span[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"成文日期")]/following::span[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"有效性")]/following::span[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"发布机构")]/following::span[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('区'):
        organ = '重庆市潼南' + organ

    fulltext_xpath = '//div[contains(@class,"zwxl-article")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99511'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CQTN"
    zt_provider = "cqtngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   重庆市荣昌区
def policy_rongchanglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//ul[@class="right-list"]/li|//ul[@class="zsj-fr-main"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.rongchang.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99512'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_rongchangarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_rongchangarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//p[@class="tit"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"发文字号")]/following::span[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"索引号")]/following::span[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"主题分类")]/following::span[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"成文日期")]/following::span[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"有效性")]/following::span[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"发布机构")]/following::span[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('区'):
        organ = '重庆市荣昌' + organ

    fulltext_xpath = '//div[contains(@class,"zwxl-article")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99512'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "RONGCHANG"
    zt_provider = "rongchanggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   重庆市开州区
def policy_cqkzlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//ul[@class="new-list"]/li|//ul[@class="zsj-fr-main"]/li|//ul[@class="fr-main"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a[1]/@href').extract_first()
            base_url = f'http://www.cqkz.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99513'
            article_json["url"] = url
            article_json["title"] = li.xpath('a[1]/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cqkzarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_cqkzarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//p[@class="tit"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"发文字号")]/following::span[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"索引号")]/following::span[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"主题分类")]/following::span[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"成文日期")]/following::span[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"有效性")]/following::span[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"发布机构")]/following::span[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('区'):
        organ = '重庆市开州' + organ

    fulltext_xpath = '//div[contains(@class,"zwxl-article")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99513'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CQKZ"
    zt_provider = "cqkzgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   重庆市梁平区
def policy_cqlplist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//ul[@class="new-list"]/li|//ul[@class="zsj-fr-main"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.cqlp.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99514'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cqlparticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_cqlparticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//p[@class="tit"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('区'):
        organ = '重庆市梁平' + organ

    fulltext_xpath = '//div[contains(@class,"zwxl-article")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99514'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CQLP"
    zt_provider = "cqlpgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   重庆市武隆区
def policy_cqwllist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//ul[@class="new-list"]/li|//ul[@class="zsj-fr-main"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://cqwl.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99515'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/span/text()|a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/span/text()|span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cqwlarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_cqwlarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//p[@class="tit"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('区'):
        organ = '重庆市武隆' + organ

    fulltext_xpath = '//div[contains(@class,"zwxl-article")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99515'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CQWL"
    zt_provider = "cqwlgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   重庆市城口县
def policy_cqcklist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//ul[@class="center-list"]/li|//ul[@class="zsj-fr-main"]/li|//ul[@class="fr-main"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a[1]/@href').extract_first()
            base_url = f'http://www.cqck.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99516'
            article_json["url"] = url
            article_json["title"] = li.xpath('a[1]/text()').extract_first().strip()
            article_json["pub_date"] = ''.join(li.xpath('text()|span/text()').extract()).strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cqckarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_cqckarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"发文字号")]/following::span[1]/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if 'class="zwxl-table"' in html:
        pub_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
        index_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
        subject = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
        written_date = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
        legal_status = ''
        organ = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    else:
        pub_no = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"发文字号")]/following::span[1]/text()').extract()).strip()
        index_no = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"索引号")]/following::span[1]/text()').extract()).strip()
        subject = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"主题分类")]/following::span[1]/text()').extract()).strip()
        written_date = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"成文日期")]/following::span[1]/text()').extract()).strip()
        legal_status = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"有效性")]/following::span[1]/text()').extract()).strip()
        organ = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"发布机构")]/following::span[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('县'):
        organ = '重庆市城口' + organ

    fulltext_xpath = '//div[contains(@class,"zwxl-article")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99516'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CQCK"
    zt_provider = "cqckgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   重庆市丰都县
def policy_cqfdlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//div[@class="gkm-crbox"]/ul/li|//ul[@class="zsj-fr-main"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.cqfd.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99517'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cqfdarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_cqfdarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//p[@class="tit"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('县'):
        organ = '重庆市丰都' + organ

    fulltext_xpath = '//div[contains(@class,"zwxl-article")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99517'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CQFD"
    zt_provider = "cqfdgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   重庆市垫江县
def policy_cqsdjlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//ul[@class="center-list sany_update_watch"]/li|//ul[@class="zsj-fr-main sany_update_watch"]/li|//ul[@class="right-list sany_update_watch"]/li|//ul[@class="right-list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.cqsdj.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99518'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cqsdjarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_cqsdjarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//p[@class="tit"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('县'):
        organ = '重庆市垫江' + organ

    fulltext_xpath = '//div[contains(@class,"zwxl-article")]|//div[@class="zcwjk-xlcon"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99518'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CQSDJ"
    zt_provider = "cqsdjgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   重庆市忠县
def policy_zhongxianlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//ul[@class="new-list"]/li|//ul[@class="zsj-fr-main"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.zhongxian.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99519'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_zhongxianarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_zhongxianarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//p[@class="tit"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"发文字号")]/following::span[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"索引号")]/following::span[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"主题分类")]/following::span[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"成文日期")]/following::span[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"有效性")]/following::span[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"发布机构")]/following::span[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('县'):
        organ = '重庆市忠' + organ

    fulltext_xpath = '//div[contains(@class,"zwxl-article")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99519'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "ZHONGXIAN"
    zt_provider = "zhongxiangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   重庆市云阳县
def policy_yunyanglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//ul[@class="pList01"]/li|//ul[@class="zsj-fr-main"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'https://www.yunyang.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99520'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_yunyangarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_yunyangarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//p[@class="tit"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"发文字号")]/following::span[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"索引号")]/following::span[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"主题分类")]/following::span[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"成文日期")]/following::span[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"有效性")]/following::span[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"发布机构")]/following::span[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('县'):
        organ = '重庆市云阳' + organ

    fulltext_xpath = '//div[contains(@class,"zwxl-article")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99520'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "YUNYANG"
    zt_provider = "yunyanggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   重庆市奉节县
def policy_cqfjlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//ul[@class="news-list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.cqfj.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99521'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cqfjarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_cqfjarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="index-table"]//div[contains(text(),"发文字号")]/following::div[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="index-table"]//div[contains(text(),"索引号")]/following::div[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="index-table"]//div[contains(text(),"主题分类")]/following::div[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="index-table"]//div[contains(text(),"成文日期")]/following::div[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="index-table"]//div[contains(text(),"有效性")]/following::div[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="index-table"]//div[contains(text(),"发布机构")]/following::div[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('县'):
        organ = '重庆市奉节' + organ

    fulltext_xpath = '//div[contains(@class,"detail-article")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99521'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CQFJ"
    zt_provider = "cqfjgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   重庆市巫山县
def policy_cqwslist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//ul[@class="new-list"]/li|//ul[@class="zsj-fr-main"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://cqws.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99522'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cqwsarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_cqwsarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"标题")]/following::td[1]/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//p[@class="tit"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('县'):
        organ = '重庆市巫山' + organ

    fulltext_xpath = '//div[contains(@class,"zwxl-article")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99522'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CQWS"
    zt_provider = "cqwsgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   重庆市巫溪县
def policy_cqwxlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//div[@class="infolists"]/ul/li|//div[@class="xx-infos"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.cqwx.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99523'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cqwxarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_cqwxarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[contains(@class,"zw-table")]//span[contains(text(),"发文字号")]/following::span[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[contains(@class,"zw-table")]//span[contains(text(),"索引号")]/following::span[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[contains(@class,"zw-table")]//span[contains(text(),"主题分类")]/following::span[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[contains(@class,"zw-table")]//span[contains(text(),"成文日期")]/following::span[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[contains(@class,"zw-table")]//span[contains(text(),"有效性")]/following::span[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[contains(@class,"zw-table")]//span[contains(text(),"发布机构")]/following::span[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('县'):
        organ = '重庆市巫溪' + organ

    fulltext_xpath = '//div[contains(@class,"zwxl-article")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99523'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CQWX"
    zt_provider = "cqwxgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   重庆市石柱土家族自治县
def policy_cqszxlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//ul[@class="center-list"]/li|//ul[@class="fr-main"]/li|//ul[@class="right-list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://cqszx.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99524'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cqszxarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_cqszxarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//p[@class="tit"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('县'):
        organ = '重庆市石柱土家族自治' + organ

    fulltext_xpath = '//div[contains(@class,"zwxl-article")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99524'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CQSZX"
    zt_provider = "cqszxgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   重庆市秀山土家族苗族自治县
def policy_cqxslist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//ul[@class="gl-list"]/li|//ul[@class="zsj-fr-main"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.cqxs.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99525'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cqxsarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_cqxsarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//p[@class="tit"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('县'):
        organ = '重庆市秀山土家族苗族自治' + organ

    fulltext_xpath = '//div[contains(@class,"zwxl-article")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99525'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CQXS"
    zt_provider = "cqxsgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   重庆市酉阳土家族苗族自治县
def policy_youyanglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//ul[@class="new-list"]/li|//ul[@class="zsj-fr-main"]/li|//ul[@class="fr-main"]/li|//ul[@class="text-list"]/li|//ul[@class="list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('p/a[1]/@href|a[1]/@href').extract_first()
            base_url = f'http://youyang.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99526'
            article_json["url"] = url
            if 'sy_236/gsgg' in callmodel.sql_model.list_rawid:
                article_json["title"] = li.xpath('a[1]/span[1]/text()').extract_first().strip()
            else:
                article_json["title"] = li.xpath('p/a[1]/text()|a[1]/text()').extract_first().strip()
            if 'zwgk_236/zfxxgkml01/zcwj/qtwj' in callmodel.sql_model.list_rawid:
                article_json["pub_date"] = ''.join(li.xpath('span[@class="date"][2]/text()').extract()).strip()
            else:
                article_json["pub_date"] = li.xpath('a/span[2]/text()|span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_youyangarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_youyangarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//p[@class="tit"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if 'class="table-box"' in html:
        pub_no = ''.join(res.xpath('//div[@class="table-box"]//span[contains(text(),"发文字号")]/following::span[1]/text()').extract()).strip()
        index_no = ''.join(res.xpath('//div[@class="table-box"]//span[contains(text(),"索引号")]/following::span[1]/text()').extract()).strip()
        subject = ''.join(res.xpath('//div[@class="table-box"]//span[contains(text(),"主题分类")]/following::span[1]/text()').extract()).strip()
        written_date = ''.join(res.xpath('//div[@class="table-box"]//span[contains(text(),"成文日期")]/following::span[1]/text()').extract()).strip()
        legal_status = ''.join(res.xpath('//div[@class="table-box"]//span[contains(text(),"有效性")]/following::span[1]/text()').extract()).strip()
        organ = ''.join(res.xpath('//div[@class="table-box"]//span[contains(text(),"发布机构")]/following::span[1]/text()').extract()).strip()
    else:
        pub_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
        index_no = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
        subject = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
        written_date = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
        legal_status = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"有效性")]/following::td[1]/text()').extract()).strip()
        organ = ''.join(res.xpath('//table[@class="zwxl-table"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('县'):
        organ = '重庆市酉阳土家族苗族自治' + organ

    fulltext_xpath = '//div[contains(@class,"zwxl-article")]|//div[@class="content-content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99526'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "YOUYANG"
    zt_provider = "youyanggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   重庆市彭水苗族土家族自治县
def policy_psxlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//ul[@class="new-list"]/li|//ul[@class="gkzd-list"]/li|//ul[@class="fr-main"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a[1]/@href').extract_first()
            base_url = f'http://www.psx.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99527'
            article_json["url"] = url
            article_json["title"] = li.xpath('a[1]/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_psxarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_psxarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//p[@class="tit"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"发文字号")]/following::span[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"索引号")]/following::span[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"主题分类")]/following::span[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"成文日期")]/following::span[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"有效性")]/following::span[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="zwxl-head"]//span[contains(text(),"发布机构")]/following::span[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(res.xpath('//p[@class="easysite-news-describe"]//text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1].split('')[0].strip()
    if organ.startswith('县'):
        organ = '重庆市彭水苗族土家族自治' + organ

    fulltext_xpath = '//div[contains(@class,"zwxl-article")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99527'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "PSX"
    zt_provider = "psxgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   四川省发展和改革委员会
def policy_fgwsclist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPageHTML\('page_div',(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'sfgw/c109344' == callmodel.sql_model.list_rawid:
            li_list = res.xpath('//div[@class="biaobody"]/ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('div[2]/a/@href').extract_first()
                # base_url = f'http://www.psx.gov.cn/{callmodel.sql_model.list_rawid}.html'
                base_url = f'http://fgw.sc.gov.cn'
                if 'http' in href:
                    url = href
                else:
                    url = base_url + href
                # url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue

                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99528'
                article_json["url"] = url
                article_json["title"] = li.xpath('div[2]/a/text()').extract_first().strip()
                article_json["pub_date"] = ''
                article_json["pub_no"] = ''.join(li.xpath('div[2]//text()').extract()).strip()
                article_json["written_date"] = li.xpath('div[3]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        elif 'sfgw/c109345' == callmodel.sql_model.list_rawid:
            li_list = res.xpath('//div[@class="gknr_list"]/dl/dd')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                # base_url = f'http://www.psx.gov.cn/{callmodel.sql_model.list_rawid}.html'
                base_url = f'http://fgw.sc.gov.cn'
                if 'http' in href:
                    url = href
                else:
                    url = base_url + href
                # url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue

                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99528'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
                article_json["pub_no"] = ''
                article_json["written_date"] = ''
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//ul[contains(@class,"list-li")]/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('div/h1/div/a/@href').extract_first()
                # base_url = f'http://www.psx.gov.cn/{callmodel.sql_model.list_rawid}.html'
                base_url = f'http://fgw.sc.gov.cn'
                if 'http' in href:
                    url = href
                else:
                    url = base_url + href
                # url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue

                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99528'
                article_json["url"] = url
                article_json["title"] = li.xpath('div/h1/div/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('div/h1/span/text()').extract_first().strip()
                article_json["pub_no"] = ''
                article_json["written_date"] = ''
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_fgwscarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_fgwscarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_no = article_json['pub_no']
    written_date = article_json['written_date']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@id="zoomtitl"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    if not pub_date:
        pub_date = ''.join(res.xpath('//publishtime/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date)
        pub_year = pub_date[:4]

    fulltext_xpath = '//div[@id="zoomcon"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99528'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "FGWSC"
    zt_provider = "fgwscgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   四川省经济和信息化厅
def policy_jxtsclist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPageHTML\('page_div',(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'sfgw/c109344' == callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[contains(@class,"list-li")]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('div/h1/div/a/@href').extract_first()
            # base_url = f'http://www.psx.gov.cn/{callmodel.sql_model.list_rawid}.html'
            base_url = f'https://jxt.sc.gov.cn'
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            # url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99529'
            article_json["url"] = url
            article_json["title"] = li.xpath('div/h1/div/a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('div/h1/span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jxtscarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jxtscarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@id="zoomtitl"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    fulltext_xpath = '//div[@id="zoomcon"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99529'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "JXTSC"
    zt_provider = "jxtscgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   四川省科学技术厅
def policy_kjtsclist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPageHTML\('page_div',(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'sfgw/c109344' == callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="content"]//ul[@class="list-unstyled"]/li|//div[@class="column-list"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()

            base_url = f'https://kjt.sc.gov.cn/{callmodel.sql_model.list_rawid}.html'
            # base_url = f'http://kjt.sc.gov.cn'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99530'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_kjtscarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_kjtscarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="newsTex"]//h1//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath('//div[@class="abstract"]//li[contains(text(),"文  号：")]/text()').extract()).strip()
    pub_no = pub_no.split('：')[-1].strip()
    index_no = ''.join(res.xpath('//div[@class="abstract"]//li[contains(text(),"索引号：")]/text()').extract()).strip()
    index_no = index_no.split('：')[-1].strip()
    subject_word = ''.join(res.xpath('//div[@class="abstract"]//li[contains(text(),"关键词：")]/text()').extract()).strip()
    subject_word = subject_word.split('：')[-1].strip()
    organ = ''.join(res.xpath('//div[@class="abstract"]//li[contains(text(),"发布机构：")]/text()').extract()).strip()
    organ = organ.split('：')[-1].strip()

    fulltext_xpath = '//div[@class="newsCon"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99530'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "KJTSC"
    zt_provider = "kjtscgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   四川省教育厅
def policy_edusclist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPageHTML\('page_div',(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'sfgw/c109344' == callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="xwzxList"]/li|//div[@class="gknr_list"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            # base_url = f'http://www.psx.gov.cn/{callmodel.sql_model.list_rawid}.html'
            base_url = f'http://edu.sc.gov.cn'
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            # url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99531'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_eduscarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_eduscarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="gknbxq_top"]/h2//text()|//div[@class="detail"]/h1//text()').extract()).strip()
    if '\t' in title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    # pub_no = ''.join(res.xpath('//div[@class="abstract"]//li[contains(text(),"发文字号：")]/text()').extract()).strip()
    # pub_no = pub_no.split('：')[-1].strip()
    # index_no = ''.join(res.xpath('//div[@class="abstract"]//li[contains(text(),"索引号：")]/text()').extract()).strip()
    # index_no = index_no.split('：')[-1].strip()
    # subject_word = ''.join(res.xpath('//div[@class="abstract"]//li[contains(text(),"关键词：")]/text()').extract()).strip()
    # subject_word = subject_word.split('：')[-1].strip()
    # organ = ''.join(res.xpath('//div[@class="abstract"]//li[contains(text(),"发布机构：")]/text()').extract()).strip()
    # organ = organ.split('：')[-1].strip()

    fulltext_xpath = '//div[@class="cont"]|//div[@class="gknbxq_detail"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99531'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "EDUSC"
    zt_provider = "eduscgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   四川省民政厅
def policy_mztsclist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPageHTML\((\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'sfgw/c109344' == callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="info-list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            # base_url = f'http://www.psx.gov.cn/{callmodel.sql_model.list_rawid}.html'
            base_url = f'http://mzt.sc.gov.cn'
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            # url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99532'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('a/span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_mztscarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_mztscarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//h1[@class="title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    # pub_no = ''.join(res.xpath('//div[@class="abstract"]//li[contains(text(),"发文字号：")]/text()').extract()).strip()
    # pub_no = pub_no.split('：')[-1].strip()
    # index_no = ''.join(res.xpath('//div[@class="abstract"]//li[contains(text(),"索引号：")]/text()').extract()).strip()
    # index_no = index_no.split('：')[-1].strip()
    # subject_word = ''.join(res.xpath('//div[@class="abstract"]//li[contains(text(),"关键词：")]/text()').extract()).strip()
    # subject_word = subject_word.split('：')[-1].strip()
    # organ = ''.join(res.xpath('//div[@class="abstract"]//li[contains(text(),"发布机构：")]/text()').extract()).strip()
    # organ = organ.split('：')[-1].strip()

    fulltext_xpath = '//div[@id="article-body"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99532'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "MZTSC"
    zt_provider = "mztscgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   四川省财政厅
def policy_cztsclist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPageHTML\('page-div',(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'sfgw/c109344' == callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="list"]/li|//div[@class="gknr_list"]/dl/dd')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            # base_url = f'http://www.psx.gov.cn/{callmodel.sql_model.list_rawid}.html'
            base_url = f'http://czt.sc.gov.cn'
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            # url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99533'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cztscarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_cztscarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="detail"]//h1//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    # pub_no = ''.join(res.xpath('//div[@class="abstract"]//li[contains(text(),"发文字号：")]/text()').extract()).strip()
    # pub_no = pub_no.split('：')[-1].strip()
    # index_no = ''.join(res.xpath('//div[@class="abstract"]//li[contains(text(),"索引号：")]/text()').extract()).strip()
    # index_no = index_no.split('：')[-1].strip()
    # subject_word = ''.join(res.xpath('//div[@class="abstract"]//li[contains(text(),"关键词：")]/text()').extract()).strip()
    # subject_word = subject_word.split('：')[-1].strip()
    # organ = ''.join(res.xpath('//div[@class="abstract"]//li[contains(text(),"发布机构：")]/text()').extract()).strip()
    # organ = organ.split('：')[-1].strip()

    fulltext_xpath = '//div[@id="NewsContent"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99533'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CZTSC"
    zt_provider = "cztscgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   四川省人力资源和社会保障厅
def policy_rstsclist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPageHTML\('page_div',(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'sfgw/c109344' == callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="box"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            # base_url = f'http://www.psx.gov.cn/{callmodel.sql_model.list_rawid}.html'
            base_url = f'http://rst.sc.gov.cn'
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            # url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99534'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/p/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('a/span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_rstscarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_rstscarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[contains(@class,"biaoTi2")]/p//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    # pub_no = ''.join(res.xpath('//div[@class="abstract"]//li[contains(text(),"发文字号：")]/text()').extract()).strip()
    # pub_no = pub_no.split('：')[-1].strip()
    # index_no = ''.join(res.xpath('//div[@class="abstract"]//li[contains(text(),"索引号：")]/text()').extract()).strip()
    # index_no = index_no.split('：')[-1].strip()
    # subject_word = ''.join(res.xpath('//div[@class="abstract"]//li[contains(text(),"关键词：")]/text()').extract()).strip()
    # subject_word = subject_word.split('：')[-1].strip()
    # organ = ''.join(res.xpath('//div[@class="abstract"]//li[contains(text(),"发布机构：")]/text()').extract()).strip()
    # organ = organ.split('：')[-1].strip()

    fulltext_xpath = '//div[@id="print"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99534'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "RSTSC"
    zt_provider = "rstscgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   四川省农业农村厅
def policy_nynctsclist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPageHTML\('page_div',(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'sfgw/c109344' == callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="artcleList"]/li|//ul[@class="fr-main"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            # base_url = f'http://www.psx.gov.cn/{callmodel.sql_model.list_rawid}.html'
            base_url = f'http://nynct.sc.gov.cn'
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            # url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99535'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/p/text()|a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_nynctscarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_nynctscarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    # pub_no = ''.join(res.xpath('//div[@class="abstract"]//li[contains(text(),"发文字号：")]/text()').extract()).strip()
    # pub_no = pub_no.split('：')[-1].strip()
    # index_no = ''.join(res.xpath('//div[@class="abstract"]//li[contains(text(),"索引号：")]/text()').extract()).strip()
    # index_no = index_no.split('：')[-1].strip()
    # subject_word = ''.join(res.xpath('//div[@class="abstract"]//li[contains(text(),"关键词：")]/text()').extract()).strip()
    # subject_word = subject_word.split('：')[-1].strip()
    # organ = ''.join(res.xpath('//div[@class="abstract"]//li[contains(text(),"发布机构：")]/text()').extract()).strip()
    # organ = organ.split('：')[-1].strip()

    fulltext_xpath = '//div[@class="content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99535'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "NYNCTSC"
    zt_provider = "nynctscgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   四川省住房和城乡建设厅
def policy_jstsclist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPageHTML\('page_div',(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'sfgw/c109344' == callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="box_list bd-box"]/ul/li|//ul[@class="public-list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            # base_url = f'http://www.psx.gov.cn/{callmodel.sql_model.list_rawid}.html'
            base_url = f'http://jst.sc.gov.cn'
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            # url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99536'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jstscarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jstscarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[contains(@class,"content-header")]//h1//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath('//ul[@id="title"]//li[contains(text(),"文号：")]/text()').extract()).strip()
    pub_no = pub_no.split('：')[-1].strip()
    index_no = ''.join(res.xpath('//ul[@id="title"]//li[contains(text(),"索 引 号：")]/text()').extract()).strip()
    index_no = index_no.split('：')[-1].strip()
    subject = ''.join(res.xpath('//ul[@id="title"]//li[contains(text(),"主题分类：")]/text()').extract()).strip()
    subject = subject.split('：')[-1].strip()
    subject_word = ''.join(res.xpath('//ul[@id="title"]//li[contains(text(),"关键词：")]/text()').extract()).strip()
    subject_word = subject_word.split('：')[-1].strip()
    organ = ''.join(res.xpath('//div[@class="abstract"]//li[contains(text(),"发布机构：")]/text()').extract()).strip()
    organ = organ.split('：')[-1].strip()

    fulltext_xpath = '//div[@class="content-main-body"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99536'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "JSTSC"
    zt_provider = "jstscgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   四川省卫生健康委员会
def policy_wsjkwsclist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPageHTML\('page_div',(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'sfgw/c109344' == callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="wy_contMain fontSt"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            # base_url = f'http://www.psx.gov.cn/{callmodel.sql_model.list_rawid}.html'
            base_url = f'http://wsjkw.sc.gov.cn'
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            # url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99537'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_wsjkwscarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_wsjkwscarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[contains(@class,"content-header")]//h1//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    # pub_no = ''.join(res.xpath('//ul[@id="title"]//li[contains(text(),"文号：")]/text()').extract()).strip()
    # pub_no = pub_no.split('：')[-1].strip()
    # index_no = ''.join(res.xpath('//ul[@id="title"]//li[contains(text(),"索 引 号：")]/text()').extract()).strip()
    # index_no = index_no.split('：')[-1].strip()
    # subject = ''.join(res.xpath('//ul[@id="title"]//li[contains(text(),"主题分类：")]/text()').extract()).strip()
    # subject = subject.split('：')[-1].strip()
    # subject_word = ''.join(res.xpath('//ul[@id="title"]//li[contains(text(),"关键词：")]/text()').extract()).strip()
    # subject_word = subject_word.split('：')[-1].strip()
    # organ = ''.join(res.xpath('//div[@class="abstract"]//li[contains(text(),"发布机构：")]/text()').extract()).strip()
    # organ = organ.split('：')[-1].strip()

    fulltext_xpath = '//div[@class="wy_contMain fontSt"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99537'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "WSJKWSC"
    zt_provider = "wsjkwscgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[@id="fujian"])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   四川省自贡市
def policy_zglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("\)/(.*?)。", para_dicts["data"]["1_1"]['html'])
        max_count = max_count[0] if max_count else "1"
        total_page = math.ceil(int(max_count.replace(',', ''))/20)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"{list_json['page_info']}_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'sfgw/c109344' == callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@id="_cms2_WAR_CMSportlet_topArticle"]/parent::ul[1]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            # base_url = f'http://www.psx.gov.cn/{callmodel.sql_model.list_rawid}.html'
            base_url = f'http://www.zg.gov.cn'
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            # url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99539'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_zgarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_zgarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//td[@class="content-title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    # pub_no = ''.join(res.xpath('//ul[@id="title"]//li[contains(text(),"文号：")]/text()').extract()).strip()
    # pub_no = pub_no.split('：')[-1].strip()
    # index_no = ''.join(res.xpath('//ul[@id="title"]//li[contains(text(),"索 引 号：")]/text()').extract()).strip()
    # index_no = index_no.split('：')[-1].strip()
    # subject = ''.join(res.xpath('//ul[@id="title"]//li[contains(text(),"主题分类：")]/text()').extract()).strip()
    # subject = subject.split('：')[-1].strip()
    # subject_word = ''.join(res.xpath('//ul[@id="title"]//li[contains(text(),"关键词：")]/text()').extract()).strip()
    # subject_word = subject_word.split('：')[-1].strip()
    # organ = ''.join(res.xpath('//div[@class="abstract"]//li[contains(text(),"发布机构：")]/text()').extract()).strip()
    # organ = organ.split('：')[-1].strip()

    fulltext_xpath = '//td[@class="content-content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99539'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "ZG"
    zt_provider = "zggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   四川省攀枝花市
def policy_panzhihualist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("= parseInt\((\d+)\)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'sfgw/c109344' == callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[contains(@class,"List_list")]/li|//div[@class="new-box"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            # base_url = f'http://www.psx.gov.cn/{callmodel.sql_model.list_rawid}.html'
            base_url = f'http://www.panzhihua.gov.cn'
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            # url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99540'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_panzhihuaarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_panzhihuaarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="mainCont"]//h1//text()').extract()).strip()
    if len(title) >= 50:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath('//div[@class="topboxs"]//strong[contains(text(),"文") and contains(text(),"号")]/following::span[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="topboxs"]//strong[contains(text(),"索") and contains(text(),"号")]/following::span[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="topboxs"]//strong[contains(text(),"成") and contains(text(),"期")]/following::span[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="topboxs"]//strong[contains(text(),"发") and contains(text(),"构")]/following::span[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '攀枝花' + organ

    fulltext_xpath = '//div[@id="Zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99540'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "PANZHIHUA"
    zt_provider = "panzhihuagovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   四川省泸州市
def policy_luzhoulist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("/共(\d+)页", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"{list_json['page_info']}_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'sfgw/c109344' == callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="newsList"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a[last()]/@href').extract_first()
            if not href:
                continue
            # base_url = f'http://www.psx.gov.cn/{callmodel.sql_model.list_rawid}.html'
            base_url = f'http://www.luzhou.gov.cn'
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            # url = parse.urljoin(base_url, href)
            # rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            rawid = url.split('/')[-1]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99541'
            article_json["url"] = url
            article_json["title"] = li.xpath('a[last()]/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_luzhouarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_luzhouarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    res = Selector(text=html)

    title = ''.join(res.xpath('//h2[@class="title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_date_info = res.xpath('//span[contains(text(),"发布时间：")]/text()').extract_first()
    pub_date = clean_pubdate(pub_date_info)
    pub_year = pub_date[:4]
    if not pub_date:
        pub_date = clean_pubdate(article_json['pub_date'])
        pub_year = pub_date[:4]

    fulltext_xpath = '//div[@class="conTxt"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99541'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "LUZHOU"
    zt_provider = "luzhougovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   四川省德阳市
def policy_deyanglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall(">共(\d+)页<", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"{list_json['page_info']}_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'sfgw/c109344' == callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@id="list_content"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            # base_url = f'http://www.psx.gov.cn/{callmodel.sql_model.list_rawid}.html'
            base_url = f'http://www.deyang.gov.cn'
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            # url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99542'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_deyangarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_deyangarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@id="headline"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath('//div[@id="symbol"]//span[contains(text(),"文") and contains(text(),"号")]/parent::div[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@id="symbol"]//span[contains(text(),"索") and contains(text(),"号")]/parent::div[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//div[@class="topboxs"]//strong[contains(text(),"成") and contains(text(),"期")]/following::span[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@id="symbol"]//span[contains(text(),"信") and contains(text(),"源")]/parent::div[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '德阳' + organ

    fulltext_xpath = '//div[@class="content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99542'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "DEYANG"
    zt_provider = "deyanggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[@id="symbol"])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   四川省绵阳市
def policy_mylist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("pageCount:(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                if 'column' in callmodel.sql_model.list_rawid:
                    dic = {"page_info": f"pageIndex={page}"}
                    sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                else:
                    sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'sfgw/c109344' == callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="article-list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            if not href:
                continue
            base_url = f'http://www.my.gov.cn/{callmodel.sql_model.list_rawid}pageIndex=1'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99543'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/span/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_myarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_myarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="title"]//h1//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    # pub_no = ''.join(res.xpath('//div[@id="symbol"]//span[contains(text(),"文") and contains(text(),"号")]/parent::div[1]/text()').extract()).strip()
    # index_no = ''.join(res.xpath('//div[@id="symbol"]//span[contains(text(),"索") and contains(text(),"号")]/parent::div[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//div[@class="topboxs"]//strong[contains(text(),"成") and contains(text(),"期")]/following::span[1]/text()').extract()).strip()
    # organ = ''.join(res.xpath('//div[@id="symbol"]//span[contains(text(),"信") and contains(text(),"源")]/parent::div[1]/text()').extract()).strip()
    # if organ.startswith('市'):
    #     organ = '德阳' + organ

    fulltext_xpath = '//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99543'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "MY"
    zt_provider = "mygovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   四川省广元市
def policy_cngylist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        # tcount = html_json['page']['total']
        total_page = html_json['Data']['TotalPages']
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['Data']['Items']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['LinkUrl']
            base_url = 'https://www.cngy.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99544'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['PubTime']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cngylist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("pageIndex>(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"pageIndex={page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'sfgw/c109344' == callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="id-list id-list-page"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            # base_url = f'http://www.psx.gov.cn/{callmodel.sql_model.list_rawid}.html'
            base_url = f'https://www.cngy.gov.cn'
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            # url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99544'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cngyarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_cngyarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="biaoti"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//table//td[contains(text(),"文号")]/following::td[1]//text()').extract()).strip()
    index_no = ''.join(res.xpath('//table//td[contains(text(),"索")]/following::td[1]//text()').extract()).strip()
    # subject = ''.join(res.xpath('//td[contains(text(),"主题:")]/following::td[1]/text()').extract()).strip()
    subject_word = ''.join(res.xpath('//table//td[contains(text(),"主题词")]/following::td[1]//text()').extract()).strip()
    written_date = ''.join(res.xpath('//table//td[contains(text(),"成文时间")]/following::td[1]//text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//td[contains(text(),"失效日期:")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table//td[contains(text(),"有效性")]/following::td[1]//text()').extract()).strip()
    organ = ''.join(res.xpath('//table//td[contains(text(),"发布机构")]/following::td[1]//text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '广元' + organ

    fulltext_xpath = '//div[@class="xxbt"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99544'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CNGY"
    zt_provider = "cngygovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   四川省遂宁市
def policy_suininglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("search-results.*?/(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = max_count[0] if max_count else "1"
        total_page = math.ceil(int(max_count.replace(',', '')) / 20)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"pageIndex={page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'sfgw/c109344' == callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="portlet-body"]/div/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            if not href:
                continue
            # base_url = f'http://www.psx.gov.cn/{callmodel.sql_model.list_rawid}.html'
            base_url = f'https://www.suining.gov.cn'
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            # url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99545'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_suiningarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_suiningarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//td[@class="content-title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if not pub_date or '0000' in pub_date:
        pub_date_info = ''.join(res.xpath('//strong[contains(text(),"发布时间：")]/following::span[1]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    if not pub_date:
        raise Exception
    pub_no = ''.join(res.xpath('//ul[@class="jouornal-meta"]//span[contains(text(),"文号")]/parent::li[1]/text()').extract()).strip()
    # index_no = ''.join(res.xpath('//div[@class="topmeta"]//span[contains(text(),"索") and contains(text(),"号")]/following::span[1]//text()').extract()).strip()
    subject = ''.join(res.xpath('//ul[@class="jouornal-meta"]//span[contains(text(),"主题分类")]/parent::li[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//table//td[contains(text(),"主题词")]/following::td[1]//text()').extract()).strip()
    # written_date = ''.join(res.xpath('//div[@class="topmeta"]//span[contains(text(),"成") and contains(text(),"期")]/following::span[1]//text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//td[contains(text(),"失效日期:")]/following::td[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//div[@class="topmeta"]//span[contains(text(),"有") and contains(text(),"性")]/following::span[1]//text()').extract()).strip()
    organ = ''.join(res.xpath('//ul[@class="jouornal-meta"]//span[contains(text(),"机构：")]/parent::li[1]/text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '遂宁' + organ

    fulltext_xpath = '//td[@class="content-content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99545'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "SUINING"
    zt_provider = "suininggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//td[@class="content-attach"])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   四川省内江市
def policy_neijianglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        # tcount = html_json['page']['total']
        total_page = html_json['totalPage']
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['results']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['url']
            base_url = 'https://www.neijiang.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99546'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['publishedTime']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_neijianglist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPageHTML\('page-div',(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'sfgw/c109344' == callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="list"]/li|//ul[@class="wdhlist"]/li|//div[@class="gknr_list"]/dl/dd')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('div[@class="coninfo"]/div/a/@href|a/@href').extract_first()
            # base_url = f'http://www.psx.gov.cn/{callmodel.sql_model.list_rawid}.html'
            base_url = f'https://www.neijiang.gov.cn'
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            # url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99546'
            article_json["url"] = url
            article_json["title"] = li.xpath('div[@class="coninfo"]/div/a/text()|a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('div[@class="timebox"]/span[@class="time"]/text()|span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_neijiangarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_neijiangarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//h1[@class="data-title"]//text()|//div[@class="con_left"]//h2//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="topmeta"]//span[contains(text(),"文") and contains(text(),"号")]/following::span[1]//text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="topmeta"]//span[contains(text(),"索") and contains(text(),"号")]/following::span[1]//text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="topmeta"]//span[contains(text(),"主") and contains(text(),"类")]/following::span[1]//text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//table//td[contains(text(),"主题词")]/following::td[1]//text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="topmeta"]//span[contains(text(),"成") and contains(text(),"期")]/following::span[1]//text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//td[contains(text(),"失效日期:")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="topmeta"]//span[contains(text(),"有") and contains(text(),"性")]/following::span[1]//text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="topmeta"]//span[contains(text(),"发") and contains(text(),"构")]/following::span[1]//text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '内江' + organ

    fulltext_xpath = '//div[@class="con_left"]|//div[@id="NewsContent"]|//div[@class="gknbxq_detail"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99546'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "NEIJIANG"
    zt_provider = "neijianggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[@class="download"])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   四川省乐山市
def policy_leshanlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        # tcount = html_json['page']['total']
        total_page = html_json['data']['totalPage']
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['data']['dataList']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['id']
            url = f'https://www.leshan.gov.cn/apistatic/lsswszf/xxgkcontent/zwgk_content_{href}.shtml?id={href}'
            # url = parse.urljoin(base_url, href)

            # rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = href
            temp["sub_db_id"] = '99547'
            article_json["url"] = url
            article_json["title"] = li['title']
            pubTime = li['pubTime'] / 1000
            date_stamp = datetime.datetime.fromtimestamp(pubTime)
            date_str = datetime.datetime.strftime(date_stamp, "%Y-%m-%d %H:%M:%S")
            article_json["pub_date"] = date_str
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_leshanlist2_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        # tcount = html_json['page']['total']
        total_page = html_json['data']['totalPage']
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['data']['dataList']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['id']
            url = f'https://www.leshan.gov.cn/apistatic/lsswszf/xxgkcontent/zwgk_content_{href}.shtml?id={href}'
            # url = parse.urljoin(base_url, href)

            # rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = href
            temp["sub_db_id"] = '99547'
            article_json["url"] = url
            article_json["title"] = li['title']
            pubTime = li['pubTime'] / 1000
            date_stamp = datetime.datetime.fromtimestamp(pubTime)
            date_str = datetime.datetime.strftime(date_stamp, "%Y-%m-%d %H:%M:%S")
            article_json["pub_date"] = date_str
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_leshanlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPageHTML\('page_div',(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'sfgw/c109344' == callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@id="content_list"]/li|//ul[@id="gfxwj"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            # base_url = f'http://www.psx.gov.cn/{callmodel.sql_model.list_rawid}.html'
            # base_url = f'https://www.leshan.gov.cn'
            # url = parse.urljoin(base_url, href)

            if 'lsswszf/gfxwj' == callmodel.sql_model.list_rawid:
                base_url = f'https://www.leshan.gov.cn'
                if 'http' in href:
                    url = href
                else:
                    url = base_url + href
                rawid = url.split('id=')[-1].strip()
                temp["task_tag"] = 'leshanarticle1'
            else:
                base_url = f'https://www.leshan.gov.cn/{callmodel.sql_model.list_rawid}/list2.shtml'
                url = parse.urljoin(base_url, href)
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            if 'htm' not in url:
                continue
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99547'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/@title').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/em/text()|span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_leshanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_leshanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@id="zoomtitl"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    # pub_no = ''.join(res.xpath('//table//td[contains(text(),"文号")]/following::td[1]//text()').extract()).strip()
    # index_no = ''.join(res.xpath('//table//td[contains(text(),"索")]/following::td[1]//text()').extract()).strip()
    # subject = ''.join(res.xpath('//td[contains(text(),"主题:")]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//table//td[contains(text(),"主题词")]/following::td[1]//text()').extract()).strip()
    # written_date = ''.join(res.xpath('//table//td[contains(text(),"成文时间")]/following::td[1]//text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//td[contains(text(),"失效日期:")]/following::td[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//table//td[contains(text(),"有效性")]/following::td[1]//text()').extract()).strip()
    # organ = ''.join(res.xpath('//table//td[contains(text(),"发布机构")]/following::td[1]//text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    # if organ.startswith('市'):
    #     organ = '广元' + organ

    fulltext_xpath = '//div[@id="zoomcon"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99547'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "LESHAN"
    zt_provider = "leshangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_leshanarticle1_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_leshanarticle1_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html_json = json.loads(callmodel.para_dicts['data']['1_1']['html'])
    html = html_json['data']['template01']['content']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = html_json['data']['publicDocument']['title']
    if not title:
        title = article_json['title'].strip()
    pub_no = html_json['data']['publicDocument']['code']
    index_no = html_json['data']['publicDocument']['callNumber']
    subject = html_json['data']['publicDocument']['chanPath']
    # subject_word = ''.join(res.xpath('//table//td[contains(text(),"主题词")]/following::td[1]//text()').extract()).strip()
    # written_date = ''.join(res.xpath('//table//td[contains(text(),"成文时间")]/following::td[1]//text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//td[contains(text(),"失效日期:")]/following::td[1]/text()').extract()).strip()
    legal_status_info = html_json['data']['publicDocument']['status']
    legal_status = '有效' if legal_status_info == '1' else '失效'
    organ = html_json['data']['publicDocument']['responsibleDepartment']
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '乐山' + organ

    # fulltext_xpath = '//div[@class="xxbt"]'
    fulltext = html

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99547'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "LESHAN"
    zt_provider = "leshangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'(//body)')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   四川省南充市
def policy_nanchonglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        # tcount = html_json['page']['total']
        total_page = html_json['data']['pager']['pageCount']
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['data']['list']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['doc_pub_url']
            base_url = 'http://www.nanchong.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99548'
            article_json["url"] = url
            article_json["title"] = li['f_2022623946335']
            article_json["pub_date"] = li['save_time']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_nanchonglist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPage\((\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'sfgw/c109344' == callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="xxgk-list xxgk-list2"]/ul/li|//ul[@class="list-ul"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.nanchong.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            # base_url = f'https://www.neijiang.gov.cn'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue

            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99548'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_nanchongarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_nanchongarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//div[@class="xl-title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_no = ''.join(res.xpath('//div[@class="xl-head"]//div[contains(text(),"文号")]/following::div[1]//text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="xl-head"]//div[contains(text(),"索引号")]/following::div[1]//text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="xl-head"]//div[contains(text(),"主题分类")]/following::div[1]//text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//table//td[contains(text(),"主题词")]/following::td[1]//text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="xl-head"]//div[contains(text(),"成文日期")]/following::div[1]//text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//td[contains(text(),"失效日期:")]/following::td[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="xl-head"]//div[contains(text(),"有效性")]/following::div[1]//text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="xl-head"]//div[contains(text(),"发布机构")]/following::div[1]//text()').extract()).strip()
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('市'):
        organ = '南充' + organ

    fulltext_xpath = '//div[@class="xl-article"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99548'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "NANCHONG"
    zt_provider = "nanchonggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   四川省眉山市
def policy_mslist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall(";&nbsp;.*?/(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"/{total_page - page}.htm"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'zfxxgk/fdzdgknr/gsgg' == callmodel.sql_model.list_rawid:
            li_list = res.xpath('//div[@class="list-con"]/ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                base_url = f'http://www.ms.gov.cn/{callmodel.sql_model.list_rawid}.htm'
                # base_url = f'http://www.deyang.gov.cn'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99549'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        elif 'zfxxgk/fdzdgknr/gsgg' == callmodel.sql_model.list_rawid:
            li_list = res.xpath('//div[contains(@class,"govnewslist")]/table/tr')[1:]
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[2]/a/@href').extract_first()
                base_url = f'http://www.ms.gov.cn/{callmodel.sql_model.list_rawid}.htm'
                # base_url = f'http://www.deyang.gov.cn'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99549'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[2]/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('td[4]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//div[contains(@class,"govnewslist")]/table/tr')[1:]
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[1]/a/@href').extract_first()
                base_url = f'http://www.ms.gov.cn/{callmodel.sql_model.list_rawid}.htm'
                # base_url = f'http://www.deyang.gov.cn'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99549'
                article_json["url"] = url
                if 'zfxxgk/z__c/gfxwj' == callmodel.sql_model.list_rawid:
                    article_json["title"] = li.xpath('td[2]/a/text()').extract_first().strip()
                    article_json["pub_date"] = li.xpath('td[5]/text()').extract_first().strip()
                else:
                    article_json["title"] = li.xpath('td[1]/a/text()').extract_first().strip()
                    article_json["pub_date"] = li.xpath('td[2]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_msarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_msarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//td[contains(@class,"titlestyle")]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath('//td[contains(text(),"文") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//td[contains(text(),"索") and contains(text(),"号")]/following::td[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//div[@class="topboxs"]//strong[contains(text(),"成") and contains(text(),"期")]/following::span[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//td[contains(text(),"发") and contains(text(),"构")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '眉山' + organ

    fulltext_xpath = '//div[contains(@id,"vsb_content")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99549'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "MS"
    zt_provider = "msgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   四川省宜宾市
def policy_yibinlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        if 'search/list' in callmodel.sql_model.list_rawid:
            html_json = json.loads(para_dicts["data"]["1_1"]['html'])
            total_page = html_json['page']['totalPages']
        else:
            max_count = re.findall("createPageHTML\((\d+)", para_dicts["data"]["1_1"]['html'])
            max_count = int(max_count[0]) if max_count else 1
            total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                if 'search/list' in callmodel.sql_model.list_rawid:
                    dic = {"page_info": f"pageNumber={page+1}"}
                    sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                else:
                    dic = {"page_info": f"index_{page}.html"}
                    sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'search/list' in callmodel.sql_model.list_rawid:
            html_json = json.loads(para_dicts["data"]["1_1"]['html'])
            li_list = html_json['page']['content']
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                url = li['DOCPUBURL']
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99550'
                article_json["url"] = url
                article_json["title"] = li['DOCTITLE']
                article_json["pub_date"] = li['PUBDATE']
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//ul[@class="gl-list-l"]/li|//div[@class="xxgk-list"]/ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                base_url = f'http://www.yibin.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                # base_url = f'http://www.deyang.gov.cn'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99550'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_yibinarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_yibinarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//table[@class="xxgk-xl-table"]//th[contains(text(),"标") and contains(text(),"题：")]/following::td[1]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath('//table[@class="xxgk-xl-table"]//th[contains(text(),"文") and contains(text(),"号：")]/following::td[1]//text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="xxgk-xl-table"]//th[contains(text(),"索") and contains(text(),"号：")]/following::td[1]//text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="xxgk-xl-table"]//th[contains(text(),"成") and contains(text(),"期：")]/following::td[1]//text()').extract()).strip()
    legal_status = ''.join(res.xpath('//table[@class="xxgk-xl-table"]//th[contains(text(),"有") and contains(text(),"性：")]/following::td[1]//text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="xxgk-xl-table"]//th[contains(text(),"发") and contains(text(),"构：")]/following::td[1]//text()').extract()).strip()
    if organ.startswith('市'):
        organ = '宜宾' + organ

    fulltext_xpath = '//div[@class="xl-article"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99550'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "YIBIN"
    zt_provider = "yibingovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//table[@class="xxgk-xl-table"])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   四川省广安市
def policy_guanganlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPageHTML\('page_div',(\d+)", para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall("createPageHTML\('page-div',(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'search/list' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="list3"]/li|//div[@class="gk_list"]/dl/dd')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.guang-an.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99551'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span[@class="fr"]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_guanganarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_guanganarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//td[@class="content1"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath('//ul[contains(@class,"zfxxgk")]//span[contains(text(),"文号")]/parent::li[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//ul[contains(@class,"zfxxgk")]//span[contains(text(),"索引号")]/parent::li[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//ul[contains(@class,"zfxxgk")]//span[contains(text(),"主题分类")]/parent::li[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//table[@class="xxgk-xl-table"]//th[contains(text(),"成") and contains(text(),"期：")]/following::td[1]//text()').extract()).strip()
    legal_status = ''.join(res.xpath('//ul[contains(@class,"zfxxgk")]//span[contains(text(),"有效性")]/parent::li[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//ul[contains(@class,"zfxxgk")]//span[contains(text(),"发布机构")]/parent::li[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '广安' + organ

    fulltext_xpath = '//div[@id="NewsContent"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99551'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "GUANGAN"
    zt_provider = "guangangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   四川省南充市
def policy_dazhoulist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        tcount = html_json['total']
        total_page = math.ceil(tcount/15)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['data']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['url']
            base_url = 'http://www.dazhou.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99552'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['inputtime']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_dazhoulist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        try:
            html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        except:
            html_json = eval(para_dicts["data"]["1_1"]['html'])
        tcount = html_json['total']
        total_page = math.ceil(int(tcount) / 10)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['data']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['url']
            base_url = 'http://www.dazhou.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99552'
            article_json["url"] = url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['inputtime']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_dazhouarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_dazhouarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = ''.join(res.xpath('//h2[@id="ct"]//text()|//h1//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if len(title) >= 50:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    pub_no = ''.join(res.xpath('//div[@class="show_title_box"]//strong[contains(text(),"文") and contains(text(),"号：")]/following::td[1]//text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="show_title_box"]//strong[contains(text(),"来") and contains(text(),"源：")]/following::td[1]//text()').extract()).strip()
    if organ.startswith('市'):
        organ = '达州' + organ

    fulltext_xpath = '//div[contains(@class,"show_content")]|//div[@id="xxbt"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99552'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "DAZHOU"
    zt_provider = "dazhougovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   四川省雅安市
def policy_yaanlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall(">共(\d+)页<", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"{list_json['page_info']}_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'search/list' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="content-list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            # base_url = f'http://www.yibin.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            base_url = f'https://www.yaan.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99553'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_yaanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_yaanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="msg-content fr"]//h1//text()|//h1[@class="xq-title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath('//div[@class="topmeta"]//span[contains(text(),"文") and contains(text(),"号：")]/following::span[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="topmeta"]//span[contains(text(),"索") and contains(text(),"号：")]/following::span[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//ul[contains(@class,"zfxxgk")]//span[contains(text(),"主题分类")]/parent::li[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="topmeta"]//span[contains(text(),"成") and contains(text(),"期：")]/following::span[1]/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="topmeta"]//span[contains(text(),"有") and contains(text(),"性：")]/following::span[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="topmeta"]//span[contains(text(),"发") and contains(text(),"构：")]/following::span[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '雅安' + organ

    fulltext_xpath = '//div[@class="xqing-web-box"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99553'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "YAAN"
    zt_provider = "yaangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[@class="xqing-web-file"])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   四川省巴中市
def policy_cnbzlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("pageCount:(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"{list_json['page_info']}_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'search/list' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="listnews"]/ul/li|//div[@class="listnews zcjd-listnews"]/ul/li|//div[@class="xxgk_nav_con"]/div')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('ul/li[@class="mc"]//a/@href|a[1]/@href').extract_first()
            if not href:
                continue
            # base_url = f'http://www.yibin.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            base_url = f'http://www.cnbz.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99554'
            article_json["url"] = url
            article_json["title"] = li.xpath('ul/li[@class="mc"]/div/a/text()|a[1]/span/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('ul/li[@class="rq"]/text()|span[@class="right date"]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cnbzarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_cnbzarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//h1[@class="newstitle"]//text()|//h1[@class="gk_title"]//text()').extract()).strip()
    if '\n' in title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    # if not pub_date:
    #     pub_date_info = ''.join(res.xpath('//div[@class="newsinfoleft fl"]/span[1]/text()').extract()).strip()
    #     pub_date = clean_pubdate(pub_date_info)
    #     pub_year = pub_date[:4]
    if not pub_date:
        raise Exception

    pub_no = ''.join(res.xpath('//table[contains(@class,"hidden-sm")]//th[contains(text(),"文") and contains(text(),"号：")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[contains(@class,"hidden-sm")]//th[contains(text(),"索")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[contains(@class,"hidden-sm")]//th[contains(text(),"所属分类")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[contains(@class,"hidden-sm")]//th[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//div[@class="topmeta"]//span[contains(text(),"有") and contains(text(),"性：")]/following::span[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[contains(@class,"hidden-sm")]//th[contains(text(),"所属机构")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '巴中' + organ

    fulltext_xpath = '//div[contains(@class,"j-fontContent")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99554'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CNBZ"
    zt_provider = "cnbzgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   四川省资阳市
def policy_ziyanglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("/<span class='pagered'>(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"{list_json['page_info']}_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'policy/?area=1&column=&topic=&feature=&page=' == callmodel.sql_model.list_rawid:
            li_list = res.xpath('//div[@class="right_box"]/div')[1:]
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('div[1]//a/@href').extract_first()
                base_url = f'http://gk.ziyang.gov.cn/{callmodel.sql_model.list_rawid}'
                # base_url = f'http://www.cnbz.gov.cn'
                url = parse.urljoin(base_url, href)
                # rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                rawid = url.split('id=')[-1]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99555'
                article_json["url"] = url
                article_json["title"] = li.xpath('div[1]//a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('div[3]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//div[@class="column_list_box_full"]/ul/li|//div[@class="catalog_full"]/ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                if '_ziyang' in callmodel.sql_model.list_rawid:
                    base_url = f'http://www.ziyang.gov.cn/{callmodel.sql_model.list_rawid}'
                else:
                    base_url = f'http://gk.ziyang.gov.cn/{callmodel.sql_model.list_rawid}'
                # base_url = f'http://www.cnbz.gov.cn'
                url = parse.urljoin(base_url, href)
                # rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                rawid = url.split('id=')[-1].strip()
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99555'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span[1]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_ziyangarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_ziyangarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//span[@id="lbl_title"]//text()|//span[@id="lbl_headline"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if not pub_date:
        pub_date_info = ''.join(res.xpath('//div[@class="info_box"]//span[contains(text(),"成") and contains(text(),"期：")]/following::td[1]/span/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    if not pub_date:
        raise Exception
    pub_no = ''.join(res.xpath('//div[@class="info_box"]//span[contains(text(),"文") and contains(text(),"号：")]/following::td[1]/span/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="info_box"]//span[contains(text(),"索") and contains(text(),"号：")]/following::td[1]/span/text()').extract()).strip()
    # subject = ''.join(res.xpath('//table[contains(@class,"hidden-sm")]//th[contains(text(),"所属分类")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[@class="info_box"]//span[contains(text(),"成") and contains(text(),"期：")]/following::td[1]/span/text()').extract()).strip()
    legal_status = ''.join(res.xpath('//div[@class="info_box"]//span[contains(text(),"有") and contains(text(),"性：")]/following::td[1]/span/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="info_box"]//span[contains(text(),"发") and contains(text(),"构：")]/following::td[1]/span/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '资阳' + organ

    fulltext_xpath = '//span[@id="lbl_content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99555'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "ZIYANG"
    zt_provider = "ziyanggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   四川省阿坝藏族羌族自治州
def policy_abazhoulist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPageHTML\('page-div',(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'policy/?area=1&column=&topic=&feature=&page=' == callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="nav_list_list_container"]/ul/li|//div[@class="list_container"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.abazhou.gov.cn/{callmodel.sql_model.list_rawid}/nav_list.shtml'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99556'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/p/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('a/span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_abazhouarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_abazhouarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="detail"]//h1//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    # pub_no = ''.join(res.xpath('//div[@class="info_box"]//span[contains(text(),"文") and contains(text(),"号：")]/following::td[1]/span/text()').extract()).strip()
    # index_no = ''.join(res.xpath('//div[@class="info_box"]//span[contains(text(),"索") and contains(text(),"号：")]/following::td[1]/span/text()').extract()).strip()
    # subject = ''.join(res.xpath('//table[contains(@class,"hidden-sm")]//th[contains(text(),"所属分类")]/following::td[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//div[@class="info_box"]//span[contains(text(),"成") and contains(text(),"期：")]/following::td[1]/span/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//div[@class="info_box"]//span[contains(text(),"有") and contains(text(),"性：")]/following::td[1]/span/text()').extract()).strip()
    # organ = ''.join(res.xpath('//div[@class="info_box"]//span[contains(text(),"发") and contains(text(),"构：")]/following::td[1]/span/text()').extract()).strip()
    # if organ.startswith('市'):
    #     organ = '资阳' + organ

    fulltext_xpath = '//div[@class="common_detail"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99556'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "ABAZHOU"
    zt_provider = "abazhougovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   四川省甘孜藏族自治州
def policy_gzzlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPageHTML\('page-div',(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page+1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'policy/?area=1&column=&topic=&feature=&page=' == callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="list_container"]/ul/li|//div[@class="gknr_list"]/dl/dd')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            # base_url = f'http://gk.ziyang.gov.cn/{callmodel.sql_model.list_rawid}'
            base_url = f'http://www.gzz.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('id=')[-1]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99557'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_gzzlist1_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
        result = DealModel()
        para_dicts = callmodel.para_dicts
        task_info = callmodel.redis_all.parse_dict["1_1"].task_info
        info_dicts = {"task_name": callmodel.sql_model.task_name,
                      "task_tag": callmodel.sql_model.task_tag,
                      "task_tag_next": task_info.task_tag_next}
        if "1_1" in para_dicts["data"]:
            html_json = json.loads(para_dicts["data"]["1_1"]['html'])
            # tcount = html_json['result']
            total_page = html_json['data']['pages']
            page_index = int(callmodel.sql_model.page_index)
            if page_index == 1:
                sql_dict = callmodel.sql_model.dict()
                di_model_bef = DealInsertModel()
                di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
                sql_dict = deal_sql_dict(sql_dict)
                list_json = json.loads(callmodel.sql_model.list_json)
                for page in range(1, total_page + 1):
                    sql_dict["page"] = total_page
                    sql_dict["page_index"] = page
                    # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                    # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                    sql_dict["list_json"] = callmodel.sql_model.list_json
                    di_model_bef.lists.append(sql_dict.copy())
                result.befor_dicts.insert.append(di_model_bef)
            di_model_next = DealInsertModel()
            di_model_next.insert_pre = CoreSqlValue.insert_ig_it

            li_list = html_json['data']['records']
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li['manuscriptId']
                url = f'http://www.gzz.gov.cn/{li["folder"]}/article/{href}'
                # rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = href
                temp["sub_db_id"] = '99564'
                article_json["url"] = url
                article_json["title"] = li['title']
                article_json["pub_date"] = li['time']
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
            result.next_dicts.insert.append(di_model_next)

        return result


def policy_gzzarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_gzzarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//h1[@class="txtcen"]//text()|//div[@class="gknbxq_top"]//h2//text()|//div[@class="article-title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    # pub_no = ''.join(res.xpath('//div[@class="info_box"]//span[contains(text(),"文") and contains(text(),"号：")]/following::td[1]/span/text()').extract()).strip()
    # index_no = ''.join(res.xpath('//div[@class="info_box"]//span[contains(text(),"索") and contains(text(),"号：")]/following::td[1]/span/text()').extract()).strip()
    # subject = ''.join(res.xpath('//table[contains(@class,"hidden-sm")]//th[contains(text(),"所属分类")]/following::td[1]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//div[@class="info_box"]//span[contains(text(),"成") and contains(text(),"期：")]/following::td[1]/span/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//div[@class="info_box"]//span[contains(text(),"有") and contains(text(),"性：")]/following::td[1]/span/text()').extract()).strip()
    # organ = ''.join(res.xpath('//div[@class="info_box"]//span[contains(text(),"发") and contains(text(),"构：")]/following::td[1]/span/text()').extract()).strip()
    # if organ.startswith('市'):
    #     organ = '资阳' + organ

    fulltext_xpath = '//div[@class="gknbxq_detail"]|//div[@id="NewsContent"]|//div[@id="contentBox"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99557'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "GZZ"
    zt_provider = "gzzgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   四川省凉山彝族自治州
def policy_lszlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPageHTML\((\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'policy/?area=1&column=&topic=&feature=&page=' == callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="list"]/li|//ul[@class="gl-list"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'http://www.lsz.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            # base_url = f'http://www.gzz.gov.cn'
            url = parse.urljoin(base_url, href)
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('id=')[-1]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99558'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_lszarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_lszarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//p[@class="xl-p1"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"][1]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath('//div[@class="zcbt"]//p[contains(text(),"发文字号")]/parent::li[1]/a/text()').extract()).replace('：', '', 1).strip()
    index_no = ''.join(res.xpath('//div[@class="zcbt"]//p[contains(text(),"索引号")]/parent::li[1]/text()').extract()).replace('：', '', 1).strip()
    subject = ''.join(res.xpath('//div[@class="zcbt"]//p[contains(text(),"主题分类")]/parent::li[1]/text()').extract()).replace('：', '', 1).strip()
    written_date = ''.join(res.xpath('//div[@class="zcbt"]//p[contains(text(),"成文日期")]/parent::li[1]/text()').extract()).replace('：', '', 1).strip()
    legal_status = ''.join(res.xpath('//div[@class="zcbt"]//p[contains(text(),"有效性")]/parent::li[1]/text()').extract()).replace('：', '', 1).strip()
    organ = ''.join(res.xpath('//div[@class="zcbt"]//p[contains(text(),"发文机关")]/parent::li[1]/text()').extract()).replace('：', '', 1).strip()
    if organ.startswith('州'):
        organ = '凉山彝族自治' + organ

    fulltext_xpath = '//div[@class="xl-box"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99558'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "LSZ"
    zt_provider = "lszgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   云南省发展和改革委员会
def policy_yndrcynlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("当前<strong>.*?</strong>/(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"{list_json['page_info']}_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'policy/?area=1&column=&topic=&feature=&page=' == callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="commonWrap"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('h2/a/@href').extract_first()
            # base_url = f'http://www.lsz.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            base_url = f'http://yndrc.yn.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            # rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            rawid = url.split('/')[-1]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99559'
            article_json["url"] = url
            article_json["title"] = li.xpath('h2/a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_yndrcynarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_yndrcynarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//h1[@class="title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    # pub_no = ''.join(res.xpath('//div[@class="zcbt"]//p[contains(text(),"发文字号")]/parent::li[1]/a/text()').extract()).replace('：', '', 1).strip()
    # index_no = ''.join(res.xpath('//div[@class="zcbt"]//p[contains(text(),"索引号")]/parent::li[1]/text()').extract()).replace('：', '', 1).strip()
    # subject = ''.join(res.xpath('//div[@class="zcbt"]//p[contains(text(),"主题分类")]/parent::li[1]/text()').extract()).replace('：', '', 1).strip()
    # written_date = ''.join(res.xpath('//div[@class="zcbt"]//p[contains(text(),"成文日期")]/parent::li[1]/text()').extract()).replace('：', '', 1).strip()
    # legal_status = ''.join(res.xpath('//div[@class="zcbt"]//p[contains(text(),"有效性")]/parent::li[1]/text()').extract()).replace('：', '', 1).strip()
    # organ = ''.join(res.xpath('//div[@class="zcbt"]//p[contains(text(),"发文机关")]/parent::li[1]/text()').extract()).replace('：', '', 1).strip()
    # if organ.startswith('州'):
    #     organ = '凉山彝族自治' + organ

    fulltext_xpath = '//div[@class="article"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99559'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "YNDRCYN"
    zt_provider = "yndrcyngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   云南省工业和信息化厅
def policy_gxtynlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("/共(\d+)页", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"{list_json['page_info']}_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'policy/?area=1&column=&topic=&feature=&page=' == callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="newsList"]/li|//ul[@class="regulationsList"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('div/a/@href|a/@href').extract_first()
            base_url = f'http://gxt.yn.gov.cn/{callmodel.sql_model.list_rawid}_1'
            url = parse.urljoin(base_url, href)
            # rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            rawid = url.split('/')[-1]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99560'
            article_json["url"] = url
            article_json["title"] = li.xpath('div/a/text()|a/text()').extract_first().strip()
            if 'zwgk/fdzdgknr/flfgjgfx2ynsgxw/xzgfxwj' == callmodel.sql_model.list_rawid:
                article_json["pub_date"] = ''
            else:
                article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_gxtynarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_gxtynarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//h2[@class="title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath('//table[@class="govDetailTable"]//td[contains(text(),"文 号：")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="govDetailTable"]//td[contains(text(),"索引号：")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="govDetailTable"]//td[contains(text(),"主题分类：")]/following::td[1]/text()').extract()).strip()
    impl_date = ''.join(res.xpath('//table[@class="govDetailTable"]//td[contains(text(),"生效日期：")]/following::td[1]/text()').extract()).strip()
    invalid_date = ''.join(res.xpath('//table[@class="govDetailTable"]//td[contains(text(),"废止日期：")]/following::td[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//div[@class="zcbt"]//p[contains(text(),"有效性")]/parent::li[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="govDetailTable"]//td[contains(text(),"发布机构：")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('省'):
        organ = '云南' + organ

    fulltext_xpath = '//div[@class="article"]|//div[@class="conTxt"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99560'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "GXTYN"
    zt_provider = "gxtyngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    data['impl_date'] = clean_pubdate(impl_date)
    data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   云南省科学技术厅
def policy_kjtynlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        bs64_str = re.findall('text-c">(.*?)<', para_dicts["data"]["1_1"]['html'])[0]
        info_str = urllib.parse.unquote(str(base64.b64decode(bs64_str.encode('utf-8')), 'utf-8'))
        max_count = re.findall(">(\d+)条<", info_str)
        max_count = int(max_count[0]) if max_count else 1
        if 'xingzhengguifanxingwenjian' in callmodel.sql_model.list_rawid:
            total_page = math.ceil(max_count/15)
        else:
            total_page = math.ceil(max_count/20)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'xingzhengguifanxingwenjian' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//table[@class="tb1"]/tbody/tr')[1:]
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[2]/span/a/@href').extract_first()
                # base_url = f'http://www.lsz.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                base_url = f'http://kjt.yn.gov.cn'
                # url = parse.urljoin(base_url, href)
                if 'http' in href:
                    url = href
                else:
                    url = base_url + href
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-1]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99561'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[2]/span/a/text()').extract_first().strip()
                article_json["pub_date"] = ''
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        elif 'tongzhigonggao' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//div[@class="list-text"]/ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                # base_url = f'http://www.lsz.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                base_url = f'http://kjt.yn.gov.cn'
                # url = parse.urljoin(base_url, href)
                if 'http' in href:
                    url = href
                else:
                    url = base_url + href
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-1]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99561'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//div[@class="scroll_wrap"]/ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('span[1]/a/@href').extract_first()
                # base_url = f'http://www.lsz.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                base_url = f'http://kjt.yn.gov.cn'
                # url = parse.urljoin(base_url, href)
                if 'http' in href:
                    url = href
                else:
                    url = base_url + href
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-1]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99561'
                article_json["url"] = url
                article_json["title"] = li.xpath('span[1]/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span[2]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_kjtynarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_kjtynarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//p[@class="gfxwj-title"]//text()|//div[@class="contt_tit"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    # pub_no = ''.join(res.xpath('//table[@class="govDetailTable"]//td[contains(text(),"文 号：")]/following::td[1]/text()').extract()).strip()
    # index_no = ''.join(res.xpath('//table[@class="govDetailTable"]//td[contains(text(),"索引号：")]/following::td[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//table[@class="govDetailTable"]//td[contains(text(),"主题分类：")]/following::td[1]/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//table[@class="govDetailTable"]//td[contains(text(),"生效日期：")]/following::td[1]/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//table[@class="govDetailTable"]//td[contains(text(),"废止日期：")]/following::td[1]/text()').extract()).strip()
    # # legal_status = ''.join(res.xpath('//div[@class="zcbt"]//p[contains(text(),"有效性")]/parent::li[1]/text()').extract()).strip()
    # organ = ''.join(res.xpath('//table[@class="govDetailTable"]//td[contains(text(),"发布机构：")]/following::td[1]/text()').extract()).strip()
    # if organ.startswith('省'):
    #     organ = '云南省' + organ

    fulltext_xpath = '//div[@class="gfxwj-content"]|//div[@class="contt_cont"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99561'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "KJTYN"
    zt_provider = "kjtyngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   云南省教育厅
def policy_jytynlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("当前.*?/ (\d+) 页", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"{list_json['page_info']}_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xingzhengguifanxingwenjian' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="yemian"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            if not href:
                continue
            base_url = f'https://jyt.yn.gov.cn/article/category/{callmodel.sql_model.list_rawid}'
            url = parse.urljoin(base_url, href)
            if 'article' not in url:
                continue
            # rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            rawid = url.split('/')[-1]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99562'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def policy_jytynarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_jytynarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="post passage-contents"]//h1/text()|//h3[@class="h3class font34"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    pub_date_info = ''.join(res.xpath('//div[@class="referencebox"]//dt[contains(text(),"公开日期")]/following::dd[1]/text()').extract()).strip()
    pub_date = clean_pubdate(pub_date_info)
    pub_year = pub_date[:4]
    if not pub_date:
        pub_date_info = ''.join(res.xpath('//div[@class="detail_about metas"]/span[1]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    if not pub_date:
        pub_date_info = ''.join(res.xpath('//div[@class="datetime"]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    if not pub_date:
        pub_date_info = ''.join(res.xpath('//div[@class="rq"]/span[1]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    if not pub_date:
        raise Exception
    pub_no = ''.join(res.xpath('//div[@class="referencebox"]//dt[contains(text(),"文号")]/following::dd[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="referencebox"]//dt[contains(text(),"索引号")]/following::dd[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//table[@class="govDetailTable"]//td[contains(text(),"主题分类：")]/following::td[1]/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//table[@class="govDetailTable"]//td[contains(text(),"生效日期：")]/following::td[1]/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//table[@class="govDetailTable"]//td[contains(text(),"废止日期：")]/following::td[1]/text()').extract()).strip()
    # # legal_status = ''.join(res.xpath('//div[@class="zcbt"]//p[contains(text(),"有效性")]/parent::li[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="referencebox"]//dt[contains(text(),"来源")]/following::dd[1]/text()').extract()).strip()
    if organ.startswith('省'):
        organ = '云南' + organ

    fulltext_xpath = '//div[@class="arti"]|//div[@id="fontSize"]|//div[@class="mart"]|//div[contains(@class,"content-body")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99562'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "JYTYN"
    zt_provider = "jytyngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'({fulltext_xpath})')
    file_info2 = get_file_info(data, res, f'(//div[@class="fill-list"])')
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   云南省民政厅
def policy_ynmzynlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("&nbsp;共(\d+)页", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if '1636' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//table[@class="tab1"]/tbody/tr')[1:]
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[2]/span/a/@href').extract_first()
                # base_url = f'http://www.lsz.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                base_url = f'http://ynmz.yn.gov.cn'
                # url = parse.urljoin(base_url, href)
                if 'http' in href:
                    url = href
                else:
                    url = base_url + href
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-1]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99563'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[2]/span/a/text()').extract_first().strip()
                article_json["pub_date"] = ''
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//div[@id="new_list"]/ul/li|//ul[@class="cim_list"]/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                # base_url = f'http://www.lsz.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                base_url = f'http://ynmz.yn.gov.cn'
                # url = parse.urljoin(base_url, href)
                if 'http' in href:
                    url = href
                else:
                    url = base_url + href
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-1]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99563'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span[@class="time"]/text()|span[@class="time_content"]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_ynmzynarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_ynmzynarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="content_item_top"]//text()|//h3[@class="title"]//text()|//div[@class="zcxw-content-title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath('//div[@class="content_item"]/div[1]//div[contains(text(),"字号")]/following::div[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="content_item"]/div[1]//div[contains(text(),"索引号")]/following::div[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//table[@class="govDetailTable"]//td[contains(text(),"主题分类：")]/following::td[1]/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//table[@class="govDetailTable"]//td[contains(text(),"生效日期：")]/following::td[1]/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//table[@class="govDetailTable"]//td[contains(text(),"废止日期：")]/following::td[1]/text()').extract()).strip()
    # # legal_status = ''.join(res.xpath('//div[@class="zcbt"]//p[contains(text(),"有效性")]/parent::li[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="content_item"]/div[2]//div[contains(text(),"文章来源")]/following::div[1]/text()').extract()).strip()
    if organ.startswith('省'):
        organ = '云南' + organ

    fulltext_xpath = '//div[@id="zoom"]|//div[@class="articlecontent"]|//div[@class="zcxw-content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99563'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "YNMZYN"
    zt_provider = "ynmzyngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   云南省财政厅
def policy_cztynlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html_json = json.loads(para_dicts["data"]["1_1"]['html'])
        # tcount = html_json['result']
        total_page =html_json['result']['pages']
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": list_json['page_info'].replace(':0', f':{page}')}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it

        li_list = html_json['result']['records']
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li['id']
            if 'listRegulatory' in callmodel.sql_model.list_rawid:
                url = f'http://czt.yn.gov.cn/gfxwj-desc.html?id={href}'
                info_url = f'http://czt.yn.gov.cn/jeecg-boot/index/tbArticle/queryAllById?id={href}'
            else:
                url = f'http://czt.yn.gov.cn/news_des.html?id={href}'
                info_url = f'http://czt.yn.gov.cn/jeecg-boot/index/tbArticle/queryAllById?id={href}'
            # rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = href
            temp["sub_db_id"] = '99564'
            article_json["url"] = url
            article_json["info_url"] = info_url
            article_json["title"] = li['title']
            article_json["pub_date"] = li['createTime']
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cztynarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_cztynarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html_json = json.loads(callmodel.para_dicts['data']['1_1']['html'])
    html = html_json['result']['parent']['content']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = article_json['pub_date']
    pub_year = pub_date[:4]
    res = Selector(text=html)
    title = html_json['result']['parent']['title']
    if not title:
        title = article_json['title'].strip()
    # pub_no = cleaned(html_json['data']['publicDocument']['code'])
    # index_no = html_json['data']['publicDocument']['callNumber']
    # subject = html_json['data']['publicDocument']['chanPath']
    # subject_word = ''.join(res.xpath('//table//td[contains(text(),"主题词")]/following::td[1]//text()').extract()).strip()
    # written_date = ''.join(res.xpath('//table//td[contains(text(),"成文时间")]/following::td[1]//text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//li[contains(text(),"实施日期")]/span/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//td[contains(text(),"失效日期:")]/following::td[1]/text()').extract()).strip()
    # legal_status_info = html_json['data']['publicDocument']['status']
    # legal_status = '有效' if legal_status_info == '1' else '失效'
    organ = html_json['result']['parent']['msgauthor']
    # if not organ:
    #     organ_info = ''.join(
    #         res.xpath('//div[@class="Article_ly"]/span[contains(text(),"来源：")]/text()').extract()).strip()
    #     organ = organ_info.split('来源：')[-1]
    if organ.startswith('省'):
        organ = '云南' + organ

    # fulltext_xpath = '//div[@class="xxbt"]'
    fulltext = html

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99564'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CZTYN"
    zt_provider = "cztyngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = clean_pubdate(pub_date)
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info1 = get_file_info(data, res, f'(//body)')
    file_info2 = list()
    file_infos = html_json['result']['child']
    if type(file_infos) is list:
        for file_info in file_infos:
            purl = 'http://czt.yn.gov.cn/upload' + file_info["enclosure"]
            dic = {'url': purl, 'name': file_info["name"], 'pub_year': pub_year, 'keyid': lngid}
            file_info2.append(dic)
    file_info = file_info1 + file_info2
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   云南省人力资源和社会保障厅
def policy_hrssynlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("共 (\d+) 页", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"{list_json['page_info']}_{page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if '1636' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="ul13 pd20"]/li|//div[@class="info-list"]/p')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            if not href:
                continue
            # base_url = f'http://www.lsz.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            base_url = f'https://hrss.yn.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if '?' in url:
                rawid = url.split('?')[-1]
            else:
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99565'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_hrssynarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_hrssynarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//span[@id="Body_lb_newsTitle"]//text()|//div[@id="print"]//h1//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    # pub_no = ''.join(res.xpath('//div[@class="content_item"]/div[1]//div[contains(text(),"字号")]/following::div[1]/text()').extract()).strip()
    # index_no = ''.join(res.xpath('//div[@class="content_item"]/div[1]//div[contains(text(),"索引号")]/following::div[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//table[@class="govDetailTable"]//td[contains(text(),"主题分类：")]/following::td[1]/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//table[@class="govDetailTable"]//td[contains(text(),"生效日期：")]/following::td[1]/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//table[@class="govDetailTable"]//td[contains(text(),"废止日期：")]/following::td[1]/text()').extract()).strip()
    # # legal_status = ''.join(res.xpath('//div[@class="zcbt"]//p[contains(text(),"有效性")]/parent::li[1]/text()').extract()).strip()
    # organ = ''.join(res.xpath('//div[@class="content_item"]/div[2]//div[contains(text(),"文章来源")]/following::div[1]/text()').extract()).strip()
    # if organ.startswith('省'):
    #     organ = '云南' + organ

    fulltext_xpath = '//div[@id="content"]|//span[@id="Body_ltl_newsContent"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99565'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "HRSSYN"
    zt_provider = "hrssyngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   云南省农业农村厅
def policy_nyncynlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        bs64_str = re.findall('text-c">(.*?)<', para_dicts["data"]["1_1"]['html'])[0]
        info_str = urllib.parse.unquote(str(base64.b64decode(bs64_str.encode('utf-8')), 'utf-8'))
        max_count = re.findall(">(\d+)条<", info_str)
        max_count = int(max_count[0]) if max_count else 1
        if 'guifanxingwenjian-new' in callmodel.sql_model.list_rawid:
            total_page = math.ceil(max_count/15)
        elif 'zuixinwenjian' in callmodel.sql_model.list_rawid:
            total_page = math.ceil(max_count/10)
        else:
            total_page = math.ceil(max_count/20)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'zuixinwenjian' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//table[@class="table"]/tbody/tr')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[2]/a/@href').extract_first()
                # base_url = f'http://www.lsz.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                base_url = f'https://nync.yn.gov.cn'
                # url = parse.urljoin(base_url, href)
                if 'http' in href:
                    url = href
                else:
                    url = base_url + href
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-1]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99566'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[2]/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('td[3]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        elif 'guifanxingwenjian-new' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//div[@class="items_list"]/ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('div/p/a/@href').extract_first()
                # base_url = f'http://www.lsz.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                base_url = f'https://nync.yn.gov.cn'
                # url = parse.urljoin(base_url, href)
                if 'http' in href:
                    url = href
                else:
                    url = base_url + href
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-1]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99566'
                article_json["url"] = url
                article_json["title"] = li.xpath('div/p/a/text()').extract_first().strip()
                article_json["pub_date"] = ''
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//div[@class="scroll_wrap"]/ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('span[1]/a/@href').extract_first()
                # base_url = f'http://www.lsz.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                base_url = f'https://nync.yn.gov.cn'
                # url = parse.urljoin(base_url, href)
                if 'http' in href:
                    url = href
                else:
                    url = base_url + href
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-1]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99566'
                article_json["url"] = url
                article_json["title"] = li.xpath('span[1]/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span[2]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_nyncynarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_nyncynarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="xqym-title yh"]//text()|//p[@class="gfxwj-title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath('//div[contains(@class,"referencebox")]//dt[contains(text(),"文") and contains(text(),"号")]/following::dd[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[contains(@class,"referencebox")]//dt[contains(text(),"索") and contains(text(),"号")]/following::dd[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//table[@class="govDetailTable"]//td[contains(text(),"主题分类：")]/following::td[1]/text()').extract()).strip()
    impl_date = ''.join(res.xpath('//div[contains(@class,"referencebox")]//dt[contains(text(),"实") and contains(text(),"实")]/following::dd[1]/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//table[@class="govDetailTable"]//td[contains(text(),"废止日期：")]/following::td[1]/text()').extract()).strip()
    # # legal_status = ''.join(res.xpath('//div[@class="zcbt"]//p[contains(text(),"有效性")]/parent::li[1]/text()').extract()).strip()
    # organ = ''.join(res.xpath('//table[@class="govDetailTable"]//td[contains(text(),"发布机构：")]/following::td[1]/text()').extract()).strip()
    # if organ.startswith('省'):
    #     organ = '云南省' + organ

    fulltext_xpath = '//div[@class="mart"]|//div[@class="gfxwj-content-wrapper"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99566'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "NYNCYN"
    zt_provider = "nyncyngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    # data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   云南省住房和城乡建设厅
def policy_zfcxjstynlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("总页数(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"list-{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'xingzhengguifanxingwenjian8777' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//div[@class="zxxbtnewlist"]/ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('span/a/@href').extract_first()
                # base_url = f'http://www.lsz.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                base_url = f'https://zfcxjst.yn.gov.cn'
                # url = parse.urljoin(base_url, href)
                if 'http' in href:
                    url = href
                else:
                    url = base_url + href
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-1]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99567'
                article_json["url"] = url
                article_json["title"] = li.xpath('span/a/text()').extract_first().strip()
                article_json["pub_date"] = ''
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//ul[@class="new"]/li|//ul[@class="zfxxgnewlist"]/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                # base_url = f'http://www.lsz.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                base_url = f'https://zfcxjst.yn.gov.cn'
                # url = parse.urljoin(base_url, href)
                if 'http' in href:
                    url = href
                else:
                    url = base_url + href
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-1]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99567'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_zfcxjstynarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_zfcxjstynarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="wzbt"]//text()|//div[@class="ynnnsstit"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    # pub_no = ''.join(res.xpath('//div[contains(@class,"referencebox")]//dt[contains(text(),"文") and contains(text(),"号")]/following::dd[1]/text()').extract()).strip()
    # index_no = ''.join(res.xpath('//div[contains(@class,"referencebox")]//dt[contains(text(),"索") and contains(text(),"号")]/following::dd[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//table[@class="govDetailTable"]//td[contains(text(),"主题分类：")]/following::td[1]/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//div[contains(@class,"referencebox")]//dt[contains(text(),"实") and contains(text(),"实")]/following::dd[1]/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//table[@class="govDetailTable"]//td[contains(text(),"废止日期：")]/following::td[1]/text()').extract()).strip()
    # # legal_status = ''.join(res.xpath('//div[@class="zcbt"]//p[contains(text(),"有效性")]/parent::li[1]/text()').extract()).strip()
    # organ = ''.join(res.xpath('//table[@class="govDetailTable"]//td[contains(text(),"发布机构：")]/following::td[1]/text()').extract()).strip()
    # if organ.startswith('省'):
    #     organ = '云南省' + organ

    fulltext_xpath = '//div[@class="contentN"]|//div[@class="zfxxxnewcontentN"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99567'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "ZFCXJSTYN"
    zt_provider = "zfcxjstyngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   云南省卫生健康委员会
def policy_ynswsjkwynlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        bs64_str = re.findall('text-c">(.*?)<', para_dicts["data"]["1_1"]['html'])[0]
        info_str = urllib.parse.unquote(str(base64.b64decode(bs64_str.encode('utf-8')), 'utf-8'))
        max_count = re.findall(">(\d+)条<", info_str)
        max_count = int(max_count[0]) if max_count else 1
        if 'html/gongshigonggao' in callmodel.sql_model.list_rawid:
            total_page = math.ceil(max_count / 25)
        elif 'zhengcewenjian/guifanxingwenjiano' in callmodel.sql_model.list_rawid:
            total_page = math.ceil(max_count / 15)
        else:
            total_page = math.ceil(max_count / 20)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'xingzhengguifanxingwenjian8777' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@class="list-ul"]/li|//div[@class="scroll_wrap"]/ul/li|//table[@class="tb1"]/tbody/tr')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('td[@class="tcc"]/span/a/@href|span[1]/a/@href').extract_first()
            if not href:
                continue
            base_url = f'http://ynswsjkw.yn.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            # base_url = f'http://ynswsjkw.yn.gov.cn'
            url = parse.urljoin(base_url, href)
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('/')[-1]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99568'
            article_json["url"] = url
            article_json["title"] = li.xpath('td[@class="tcc"]/span/a/text()|span[1]/a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('td[@class="tcc"]/i/i/text()|span[2]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_ynswsjkwynarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_ynswsjkwynarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//p[@class="biaoti_big"]//text()|//div[@class="xqym-title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath('//div[contains(@class,"referencebox")]//dt[contains(text(),"文") and contains(text(),"号")]/following::dd[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[contains(@class,"referencebox")]//dt[contains(text(),"索") and contains(text(),"号")]/following::dd[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//table[@class="govDetailTable"]//td[contains(text(),"主题分类：")]/following::td[1]/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//div[contains(@class,"referencebox")]//dt[contains(text(),"实") and contains(text(),"实")]/following::dd[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//div[contains(@class,"referencebox")]//dt[contains(text(),"成文日期")]/following::dd[1]/text()').extract()).strip()
    # legal_status = ''.join(res.xpath('//div[@class="zcbt"]//p[contains(text(),"有效性")]/parent::li[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[contains(@class,"referencebox")]//dt[contains(text(),"发布机构")]/following::dd[1]/text()').extract()).strip()
    if organ.startswith('省'):
        organ = '云南' + organ

    fulltext_xpath = '//div[@id="content"]|//div[@class="mart"]|//div[@class="gfxwj-content"]|//div[contains(@class,"scroll_cont")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99568'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "YNSWSJKWYN"
    zt_provider = "ynswsjkwyngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   云南省昆明市
def policy_kmlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('PageIndex>(\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[1]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"index_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'zfxxgk/zcwj/xzgfxwj' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//div[@class="main_zfxxgk_gzk_list"]/ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('div[2]/a/@href').extract_first()
                base_url = f'https://www.km.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-1]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99569'
                article_json["url"] = url
                article_json["title"] = li.xpath('div[2]/a/text()').extract_first().strip()
                article_json["pub_date"] = ''
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//div[@class="data-table"]/div')[1:]
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('div/p[@class="w659"]/a[1]/@href|div/p[@class="w571"]/a/@href').extract_first()
                base_url = f'https://www.km.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-1]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99569'
                article_json["url"] = url
                article_json["title"] = li.xpath('div/p[@class="w659"]/a[1]/text()|div/p[@class="w571"]/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('div/p[@class="w80"]/a/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_kmarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_kmarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="title pr"]//text()|//h3[@class="fz30"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    # if not pub_date:
    #     pub_date_info = ''.join(res.xpath('//div[@class="title pr"]//text()|//h3[@class="fz30"]//text()').extract()).strip()
    #     pub_date = clean_pubdate(pub_date_info)
    #     pub_year = pub_date[:4]

    pub_no = ''.join(res.xpath('//div[contains(@class,"topInfo")]//span[contains(text(),"文号")]/following::div[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[contains(@class,"topInfo")]//span[contains(text(),"索引号")]/following::div[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//table[@class="govDetailTable"]//td[contains(text(),"主题分类：")]/following::td[1]/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//div[contains(@class,"referencebox")]//dt[contains(text(),"实") and contains(text(),"实")]/following::dd[1]/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//table[@class="govDetailTable"]//td[contains(text(),"废止日期：")]/following::td[1]/text()').extract()).strip()
    # # legal_status = ''.join(res.xpath('//div[@class="zcbt"]//p[contains(text(),"有效性")]/parent::li[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[contains(@class,"topInfo")]//span[contains(text(),"来源")]/following::div[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '昆明' + organ

    fulltext_xpath = '//div[@id="detail_con"]|//div[@class="gzkMainCon pr"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99569'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "KM"
    zt_provider = "kmgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   云南省曲靖市
def policy_qjlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('>(\d+)条<', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = math.ceil(max_count/15)
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'zfwj/gfxwj' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//div[@class="zfxxgk_zd_head"]/ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('div[@class="bt"]/div/a/@href').extract_first()
                # base_url = f'http://www.lsz.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                base_url = f'https://www.qj.gov.cn'
                # url = parse.urljoin(base_url, href)
                if 'http' in href:
                    url = href
                else:
                    url = base_url + href
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-1]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99570'
                article_json["url"] = url
                article_json["title"] = li.xpath('div[@class="bt"]/div/a/text()').extract_first().strip()
                article_json["pub_date"] = ''
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        elif 'liqizhengce' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//div[@class="zfxxgk_zd_head"]/ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('div[@class="bt"]/div/a/@href').extract_first()
                # base_url = f'http://www.lsz.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                base_url = f'https://www.qj.gov.cn'
                # url = parse.urljoin(base_url, href)
                if 'http' in href:
                    url = href
                else:
                    url = base_url + href
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-1]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99570'
                article_json["url"] = url
                article_json["title"] = li.xpath('div[@class="bt"]/div/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('div[@class="xz"]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//div[@class="scrollbar"]/ul/li|//div[@class="scroll_wrap"]//ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('span[1]/a/@href').extract_first()
                # base_url = f'http://www.lsz.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                base_url = f'https://www.qj.gov.cn'
                # url = parse.urljoin(base_url, href)
                if 'http' in href:
                    url = href
                else:
                    url = base_url + href
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-1]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99570'
                article_json["url"] = url
                article_json["title"] = li.xpath('span[1]/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span[2]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_qjarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_qjarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//h1//text()').extract()).strip()
    if len(title) >= 50:
        title = ''.join(res.xpath('//table[@class="zfxxgk_main_header_table"]//td[contains(text(),"标　　题")]/following::td[1]/text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//div[@class="scroll_wrap"]/h1//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath('//table[@class="zfxxgk_main_header_table"]//td[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="zfxxgk_main_header_table"]//td[contains(text(),"索 引 号：")]/following::td[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//table[@class="zfxxgk_main_header_table"]//td[contains(text(),"主题分类")]/following::td[1]/text()').extract()).strip()
    subject_word = ''.join(res.xpath('//table[@class="zfxxgk_main_header_table"]//td[contains(text(),"主 题 词")]/following::td[1]/text()').extract()).strip()
    written_date = ''.join(res.xpath('//table[@class="zfxxgk_main_header_table"]//td[contains(text(),"成文日期")]/following::td[1]/text()').extract()).strip()
    # # legal_status = ''.join(res.xpath('//div[@class="zcbt"]//p[contains(text(),"有效性")]/parent::li[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="zfxxgk_main_header_table"]//td[contains(text(),"发文机关")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '曲靖' + organ

    fulltext_xpath = '//div[@class="scroll_cont ScrollStyle"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99570'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "QJ"
    zt_provider = "qjgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   云南省玉溪市
def policy_yuxilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall(';共 (\d+) 页', para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall('共&nbsp;(\d+)&nbsp;', para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall('createPageHTML\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                if 'yxs/' in callmodel.sql_model.list_rawid:
                    dic = {"page_info": f"index_{page}.html"}
                else:
                    dic = {"page_info": f"pageNo={page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'yxs/' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//div[@id="searchList"]/a')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('@href').extract_first()
                # base_url = f'http://www.lsz.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                base_url = f'http://www.yuxi.gov.cn'
                # url = parse.urljoin(base_url, href)
                if 'http' in href:
                    url = href
                else:
                    url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-1]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99571'
                article_json["url"] = url
                article_json["title"] = li.xpath('text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        elif '1769' in callmodel.sql_model.list_rawid or '311' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//div[@class="conRight"]/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('span[1]/a/@href').extract_first()
                # base_url = f'http://www.lsz.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                base_url = f'http://www.yuxi.gov.cn'
                # url = parse.urljoin(base_url, href)
                if 'http' in href:
                    url = href
                else:
                    url = base_url + href
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-1]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99571'
                article_json["url"] = url
                article_json["title"] = li.xpath('span[1]/a/text()').extract_first().strip()
                article_json["pub_date"] = ''
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        elif '=standard' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//table[@class="tb1"]/tbody/tr')[1:]
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[2]/span/a/@href').extract_first()
                # base_url = f'http://www.lsz.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                base_url = f'http://www.yuxi.gov.cn'
                # url = parse.urljoin(base_url, href)
                if 'http' in href:
                    url = href
                else:
                    url = base_url + href
                # rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                rawid = url.split('contentId=')[-1].strip()
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99571'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[2]/span/a/text()').extract_first().strip()
                article_json["pub_date"] = ''
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//table[@class="tb1"]/tbody/tr')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[2]/span/a/@href').extract_first()
                # base_url = f'http://www.lsz.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                base_url = f'http://www.yuxi.gov.cn'
                # url = parse.urljoin(base_url, href)
                if 'http' in href:
                    url = href
                else:
                    url = base_url + href
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-1]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99571'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[2]/span/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('td[3]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_yuxiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_yuxiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="ArticleTitle"]//text()|//h3//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if not pub_date:
        pub_date_info = ''.join(res.xpath('//span[contains(text(),"发布时间：")]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info.replace('发布时间：', ''))
        pub_year = pub_date[:4]
    if not pub_date:
        pub_date_info = ''.join(res.xpath('//div[@class="ArticleAbout"]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info.split('发布时间：')[-1])
        pub_year = pub_date[:4]

    # pub_no = ''.join(res.xpath('//table[@class="zfxxgk_main_header_table"]//td[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
    # index_no = ''.join(res.xpath('//div[contains(@class,"topInfo")]//span[contains(text(),"索引号")]/following::div[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//table[@class="govDetailTable"]//td[contains(text(),"主题分类：")]/following::td[1]/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//div[contains(@class,"referencebox")]//dt[contains(text(),"实") and contains(text(),"实")]/following::dd[1]/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//table[@class="govDetailTable"]//td[contains(text(),"废止日期：")]/following::td[1]/text()').extract()).strip()
    # # legal_status = ''.join(res.xpath('//div[@class="zcbt"]//p[contains(text(),"有效性")]/parent::li[1]/text()').extract()).strip()
    # organ = ''.join(res.xpath('//table[@class="zfxxgk_main_header_table"]//td[contains(text(),"发文机关")]/following::td[1]/text()').extract()).strip()
    # if organ.startswith('市'):
    #     organ = '曲靖' + organ

    fulltext_xpath = '//div[@class="ArticleBody"]|//div[@id="content-txt-2"]|//div[@class="content-txt"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99571'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "YUXI"
    zt_provider = "yuxigovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   云南省昭通市
def policy_ztlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('>共 (\d+) 页<', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        if 'lanmu/xwzx/15' in callmodel.sql_model.list_rawid:
            total_page = 165
        if 'lanmu/zwgk/3137' in callmodel.sql_model.list_rawid:
            total_page = 12
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'channels/4663' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//table[@class="tb1"]/tbody/tr')[1:]
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[2]/span/a/@href').extract_first()
                base_url = f'http://www.zt.gov.cn/{callmodel.sql_model.list_rawid}.html'
                url = parse.urljoin(base_url, href)
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99572'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[2]/span/a/text()').extract_first().strip()
                article_json["pub_date"] = ''
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        elif 'lanmu/xwzx/15' in callmodel.sql_model.list_rawid or 'lanmu/zwgk/3137' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//div[@class="panel-body"]/div/div')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('h3/a/@href').extract_first()
                base_url = f'http://www.zt.gov.cn/{callmodel.sql_model.list_rawid}.html'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99572'
                article_json["url"] = url
                if not li.xpath('h3/a/text()|div/h3/a/text()').extract_first():
                    continue
                article_json["title"] = li.xpath('h3/a/text()|div/h3/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('div[1]/span[2]/text()|div/div[1]/span[2]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//div[@class="table-wrap"]/table/tbody/tr')[1:]
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[2]/a/@href').extract_first()
                base_url = f'http://www.zt.gov.cn/{callmodel.sql_model.list_rawid}.html'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99572'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[2]/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('td[3]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_ztarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_ztarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//span[@class="t title"]//text()|//h1//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    # pub_no = ''.join(res.xpath('//table[@class="zfxxgk_main_header_table"]//td[contains(text(),"发文字号")]/following::td[1]/text()').extract()).strip()
    # index_no = ''.join(res.xpath('//div[contains(@class,"topInfo")]//span[contains(text(),"索引号")]/following::div[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//table[@class="govDetailTable"]//td[contains(text(),"主题分类：")]/following::td[1]/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//div[contains(@class,"referencebox")]//dt[contains(text(),"实") and contains(text(),"实")]/following::dd[1]/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//table[@class="govDetailTable"]//td[contains(text(),"废止日期：")]/following::td[1]/text()').extract()).strip()
    # # legal_status = ''.join(res.xpath('//div[@class="zcbt"]//p[contains(text(),"有效性")]/parent::li[1]/text()').extract()).strip()
    # organ = ''.join(res.xpath('//table[@class="zfxxgk_main_header_table"]//td[contains(text(),"发文机关")]/following::td[1]/text()').extract()).strip()
    # if organ.startswith('市'):
    #     organ = '曲靖' + organ

    fulltext_xpath = '//div[@id="artibody"]|//div[@class="scroll_cont ScrollStyle"]|//div[@class="mc ohd clearfix"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99572'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "ZT"
    zt_provider = "ztgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   云南省保山市
def policy_baoshanlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('simple_list_gotopage_fun\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"/{total_page - page + 1}.htm"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'index/tzgg' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//ul[@class="list_news_s clearfloat"]/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                # base_url = f'http://www.lsz.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                if page_index == 1:
                    base_url = f'http://www.baoshan.gov.cn/index/tzgg.htm'
                else:
                    base_url = f'http://www.baoshan.gov.cn/index/tzgg/1.htm'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-1]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99573'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        elif 'zfxxgk/zfxxgkpt/zcwj/xzgfxwj' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//tr[@class="tml"]')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[2]/span/a/@href').extract_first()
                if not href:
                    continue
                # base_url = f'http://www.lsz.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                base_url = f'http://www.baoshan.gov.cn/zfxxgk/zfxxgkpt/zcwj.htm'
                url = parse.urljoin(base_url, href)
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('indentifier=')[-1].strip()
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99573'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[2]/span/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('td[2]/i[2]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//div[@class="scroll_wrap"]/ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                base_url = f'http://www.baoshan.gov.cn/{callmodel.sql_model.list_rawid}.htm'
                # base_url = f'http://www.zt.gov.cn'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-1]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99573'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_baoshanarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_baoshanarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="tit"]//text()|//p[class="gzbtb"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath('//table[@class="zwb_zw_yybji"]//td[contains(text(),"文号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//table[@class="zwb_zw_yybji"]//td[contains(text(),"索引号")]/following::td[1]/text()').extract()).strip()
    subject_word = ''.join(res.xpath('//table[@class="zwb_zw_yybji"]//td[contains(text(),"主题词")]/following::td[1]/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//div[contains(@class,"referencebox")]//dt[contains(text(),"实") and contains(text(),"实")]/following::dd[1]/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//table[@class="govDetailTable"]//td[contains(text(),"废止日期：")]/following::td[1]/text()').extract()).strip()
    # # legal_status = ''.join(res.xpath('//div[@class="zcbt"]//p[contains(text(),"有效性")]/parent::li[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//table[@class="zwb_zw_yybji"]//td[contains(text(),"发布机构")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '保山' + organ

    fulltext_xpath = '//div[contains(@id,"vsb_content")]|//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99573'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "BAOSHAN"
    zt_provider = "baoshangovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   云南省丽江市
def policy_lijianglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPageHTML\('page-div',(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'index/tzgg' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//dl[@class="thlist"]|//dl[@class="hdlist"]|//div[@class="gknr_list"]/dl/dd')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('dt/a/@href|a/@href').extract_first()
            # base_url = f'http://www.lsz.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
            base_url = f'http://www.lijiang.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('/')[-1]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99574'
            article_json["url"] = url
            article_json["title"] = li.xpath('dt/a/text()|a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('dd/text()|span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_lijiangarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_lijiangarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//h3[@class="h3class"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath('//div[@class="referencebox"]//dt[contains(text(),"文") and contains(text(),"号")]/following::dd[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="referencebox"]//dt[contains(text(),"索引号")]/following::dd[1]/text()').extract()).strip()
    # subject = ''.join(res.xpath('//table[@class="zwb_zw_yybji"]//td[contains(text(),"主题词")]/following::td[1]/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//div[contains(@class,"referencebox")]//dt[contains(text(),"实") and contains(text(),"实")]/following::dd[1]/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//table[@class="govDetailTable"]//td[contains(text(),"废止日期：")]/following::td[1]/text()').extract()).strip()
    # # legal_status = ''.join(res.xpath('//div[@class="zcbt"]//p[contains(text(),"有效性")]/parent::li[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="referencebox"]//dt[contains(text(),"来") and contains(text(),"源")]/following::dd[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '丽江' + organ

    fulltext_xpath = '//div[@class="arti"]|//div[@class="zfxxgk_content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99574'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "LIJIANG"
    zt_provider = "lijianggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   云南省普洱市
def policy_puershilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('list_gotopage_fun\((\d+)', para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall('&nbsp;.*?/(\d+)&nbsp;', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"/{total_page - page + 1}.htm"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        li_list = res.xpath('//div[@class="list"]/ul/li|//div[@class="gkpt_rig right"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            if page_index == 1:
                base_url = f'http://www.puershi.gov.cn/{callmodel.sql_model.list_rawid}.htm'
            else:
                base_url = f'http://www.puershi.gov.cn/{callmodel.sql_model.list_rawid}/1.htm'
            # base_url = f'http://www.zt.gov.cn'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('/')[-1]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99575'
            article_json["url"] = url
            if 'xwzx/tzgg' in callmodel.sql_model.list_rawid:
                article_json["title"] = li.xpath('a/@title').extract_first().strip()
                article_json["pub_date"] = li.xpath('a/span[2]/text()').extract_first().strip()
            else:
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('a/span/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_puershiarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_puershiarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="contit"]//h3/text()|//h2[@class="contit"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath('//td[contains(text(),"文") and contains(text(),"号：")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//td[contains(text(),"索") and contains(text(),"号：")]/following::td[1]/text()').extract()).strip()
    subject_word = ''.join(res.xpath('//td[contains(text(),"主题词：")]/following::td[1]/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//div[contains(@class,"referencebox")]//dt[contains(text(),"实") and contains(text(),"实")]/following::dd[1]/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//table[@class="govDetailTable"]//td[contains(text(),"废止日期：")]/following::td[1]/text()').extract()).strip()
    # # legal_status = ''.join(res.xpath('//div[@class="zcbt"]//p[contains(text(),"有效性")]/parent::li[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//td[contains(text(),"发布机构：")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '普洱' + organ

    fulltext_xpath = '//div[contains(@id,"vsb_content")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99575'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "PUERSHI"
    zt_provider = "puershigovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   云南省临沧市
def policy_lincanglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        max_count = re.findall('(\d+)</a></span><span class="p_next p_fun', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"/{total_page - page }.htm"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if page_index == 1:
            base_url = f'http://www.lincang.gov.cn/{callmodel.sql_model.list_rawid}.htm'
        else:
            base_url = f'http://www.lincang.gov.cn/{callmodel.sql_model.list_rawid}/1.htm'
        if 'gfxwj' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//table[@class="tb1"]/tbody/tr')[1:]
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[2]/span/a/@href').extract_first()
                # base_url = f'http://www.puershi.gov.cn'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('indentifier=')[-1]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99576'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[2]/span/a/@title').extract_first().strip()
                article_json["pub_date"] = ''
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        elif 'zwgk' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//ul[@class="dtxx_list"]/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('span[1]/a/@href').extract_first()
                url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('indentifier=')[-1]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99576'
                article_json["url"] = url
                article_json["title"] = li.xpath('span[1]/a/@title').extract_first().strip()
                article_json["pub_date"] = li.xpath('span[2]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//div[contains(@class,"govnewslist")]/li|//span[@class="list_time"]/ancestor::ul[1]/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('span[@class="tit_l"]/a/@href|a/@href').extract_first()
                # base_url = f'http://www.zt.gov.cn'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-1]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99576'
                article_json["url"] = url
                article_json["title"] = li.xpath('span[@class="tit_l"]/a/text()|a/text()').extract_first().strip()
                if 'ztzl/lqyhmzczl/lcslqwj' in callmodel.sql_model.list_rawid:
                    pub_date = li.xpath('span/text()').extract_first().strip()
                else:
                    pub_date = li.xpath('span[@class="tit_r"]/text()|span[@class="list_time"]/text()').extract_first().strip()
                article_json["pub_date"] = pub_date
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_lincangarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_lincangarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//h1[@class="xxgktit"]//text()|//h1[@id="jiuctit"]//text()|//span[@class="t"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath('//td[contains(text(),"文") and contains(text(),"号：")]/text()').extract()).strip()
    pub_no = pub_no.split('：')[-1].strip()
    index_no = ''.join(res.xpath('//td[contains(text(),"索引号:")]/text()').extract()).strip()
    index_no = index_no.split('：')[-1].strip()
    # subject_word = ''.join(res.xpath('//td[contains(text(),"主题词：")]/following::td[1]/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//div[contains(@class,"referencebox")]//dt[contains(text(),"实") and contains(text(),"实")]/following::dd[1]/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//table[@class="govDetailTable"]//td[contains(text(),"废止日期：")]/following::td[1]/text()').extract()).strip()
    # # legal_status = ''.join(res.xpath('//div[@class="zcbt"]//p[contains(text(),"有效性")]/parent::li[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//td[contains(text(),"发布机构：")]/text()').extract()).strip()
    organ = organ.split('：')[-1].strip()
    if organ.startswith('市'):
        organ = '临沧' + organ

    fulltext_xpath = '//div[contains(@id,"vsb_content")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99576'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "LINCANG"
    zt_provider = "lincanggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   云南省德宏傣族景颇族自治州
def policy_dhlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('第.*?/(\d+)页', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"={page}"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'sccode=' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//div[@class="Bborder"]/ul/li|//div[@class="zfxx scorll"]/ul/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            # base_url = f'http://www.puershi.gov.cn/{callmodel.sql_model.list_rawid}'
            base_url = f'https://www.dh.gov.cn'
            # url = parse.urljoin(base_url, href)
            if 'http' in href:
                url = href
            else:
                url = base_url + href
            if 'htm' not in url:
                continue
            rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            # rawid = url.split('/')[-1]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99577'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/text()').extract_first().strip()
            article_json["pub_date"] = li.xpath('span[1]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_dharticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_dharticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="contitle"]//h1/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    # pub_no = ''.join(res.xpath('//td[contains(text(),"文") and contains(text(),"号：")]/following::td[1]/text()').extract()).strip()
    # index_no = ''.join(res.xpath('//td[contains(text(),"索") and contains(text(),"号：")]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//td[contains(text(),"主题词：")]/following::td[1]/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//div[contains(@class,"referencebox")]//dt[contains(text(),"实") and contains(text(),"实")]/following::dd[1]/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//table[@class="govDetailTable"]//td[contains(text(),"废止日期：")]/following::td[1]/text()').extract()).strip()
    # # legal_status = ''.join(res.xpath('//div[@class="zcbt"]//p[contains(text(),"有效性")]/parent::li[1]/text()').extract()).strip()
    # organ = ''.join(res.xpath('//td[contains(text(),"发布机构：")]/following::td[1]/text()').extract()).strip()
    # if organ.startswith('市'):
    #     organ = '普洱' + organ

    fulltext_xpath = '//div[@class="content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99577'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "DH"
    zt_provider = "dhgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   云南省怒江傈僳族自治州
def policy_nujianglist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('/共(\d+)页', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'zfb/xzgfxwj' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//ul[@id="dataList"]/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('div[1]//a/@href').extract_first()
                # base_url = f'http://www.puershi.gov.cn/{callmodel.sql_model.list_rawid}'
                base_url = f'https://www.nujiang.gov.cn'
                # url = parse.urljoin(base_url, href)
                if 'http' in href:
                    url = href
                else:
                    url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-1]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99578'
                article_json["url"] = url
                article_json["title"] = li.xpath('div[1]//a/text()').extract_first().strip()
                article_json["pub_date"] = ''
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//ul[@class="text-big"]/li|//div[@class="gkmus2-1"]/ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                # base_url = f'http://www.puershi.gov.cn/{callmodel.sql_model.list_rawid}'
                base_url = f'https://www.nujiang.gov.cn'
                # url = parse.urljoin(base_url, href)
                if 'http' in href:
                    url = href
                else:
                    url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-1]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99578'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_nujiangarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_nujiangarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="panel-body"]//h1/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath('//div[@class="referencebox"]//dt[contains(text(),"文") and contains(text(),"号")]/following::dd[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="referencebox"]//dt[contains(text(),"索引号")]/following::dd[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="referencebox"]//dt[contains(text(),"主题分类")]/following::dd[1]/text()').extract()).strip()
    # impl_date = ''.join(res.xpath('//div[contains(@class,"referencebox")]//dt[contains(text(),"实") and contains(text(),"实")]/following::dd[1]/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//table[@class="govDetailTable"]//td[contains(text(),"废止日期：")]/following::td[1]/text()').extract()).strip()
    # # legal_status = ''.join(res.xpath('//div[@class="zcbt"]//p[contains(text(),"有效性")]/parent::li[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="referencebox"]//dt[contains(text(),"发文机构")]/following::dd[1]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '怒江' + organ

    fulltext_xpath = '//div[@id="content"]|//div[@id="zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99578'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "NUJIANG"
    zt_provider = "nujianggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   云南省大理白族自治州
def policy_dalilist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall("createPageHTML\('page-div',(\d+)", para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'dlrmzf/xxgkml/c105889/gfxwj' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//div[@class="gkgz_list_content"]/ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('div[2]//a/@href').extract_first()
                # base_url = f'http://www.puershi.gov.cn/{callmodel.sql_model.list_rawid}'
                base_url = f'http://www.dali.gov.cn'
                # url = parse.urljoin(base_url, href)
                if 'http' in href:
                    url = href
                else:
                    url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-1]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99580'
                article_json["url"] = url
                article_json["title"] = li.xpath('div[2]//a/text()').extract_first().strip()
                article_json["pub_date"] = ''
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//ul[@class="padd15"]/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                # base_url = f'http://www.puershi.gov.cn/{callmodel.sql_model.list_rawid}'
                base_url = f'http://www.dali.gov.cn'
                # url = parse.urljoin(base_url, href)
                if 'http' in href:
                    url = href
                else:
                    url = base_url + href
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-1]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99580'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_daliarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_daliarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//h1/text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath('//span[@id="fwzh"]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="htw_div"]//b[contains(text(),"索引号")]/parent::li[1]/text()').extract()).strip()
    subject = ''.join(res.xpath('//div[@class="htw_div"]//b[contains(text(),"主题分类")]/parent::li[1]/text()').extract()).strip()
    subject_word = ''.join(res.xpath('//div[@class="htw_div"]//b[contains(text(),"主题词")]/parent::li[1]/text()').extract()).strip()
    date_str = ''.join(res.xpath('//span[@class="cwrq"]/text()').extract()).strip()
    if date_str:
        date_stamp = datetime.datetime.fromtimestamp(int(date_str) / 1000)
        written_date = datetime.datetime.strftime(date_stamp, "%Y-%m-%d %H:%M:%S")
    else:
        written_date = ''
    # invalid_date = ''.join(res.xpath('//table[@class="govDetailTable"]//td[contains(text(),"废止日期：")]/following::td[1]/text()').extract()).strip()
    ls = re.findall('_IS_FZ = "(.*?)"', html)
    if ls and ls[0] == "有效":
        legal_status = "有效"
    else:
        legal_status = ""
    organ = ''.join(res.xpath('//div[@class="htw_div"]//b[contains(text(),"发布机构")]/parent::li[1]/text()').extract()).strip()
    if organ.startswith('州'):
        organ = '大理白族自治' + organ

    fulltext_xpath = '//div[@id="NewsContent"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99580'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "DALI"
    zt_provider = "daligovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    data['subject'] = subject
    data['subject_word'] = subject_word
    data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   云南省楚雄彝族自治州
def policy_cxzlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('list_gotopage_fun\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                if 'dtyw/tzgg' in callmodel.sql_model.list_rawid:
                    dic = {"page_info": f"/{total_page - page + 1}.htm"}
                else:
                    dic = {"page_info": f"&PAGENUM={page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'wbtreeid=3494' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//table[@class="tb1"]/tbody/tr')[1:]
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[2]/span/a/@href').extract_first()
                # base_url = f'http://www.lsz.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                base_url = f'http://www.cxz.gov.cn'
                # url = parse.urljoin(base_url, href)
                if 'http' in href:
                    url = href
                else:
                    url = base_url + href
                # rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                rawid = url.split('&indentifier=')[-1].strip()
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99581'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[2]/span/a/text()').extract_first().strip()
                article_json["pub_date"] = ''
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        elif 'dtyw/tzgg' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//div[@class="listbox"]/ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                if page_index == 1:
                    base_url = f'http://www.cxz.gov.cn/{callmodel.sql_model.list_rawid}.htm'
                else:
                    base_url = f'http://www.cxz.gov.cn/{callmodel.sql_model.list_rawid}/1.htm'
                # base_url = f'http://www.baoshan.gov.cn'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('indentifier=')[-1]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99581'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//div[@class="gkzd-box_l"]/ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a[1]/@href').extract_first()
                base_url = f'http://www.cxz.gov.cn/{callmodel.sql_model.list_rawid}&PAGENUM=1'
                # base_url = f'http://www.zt.gov.cn'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-1]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99581'
                article_json["url"] = url
                article_json["title"] = li.xpath('a[1]/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span[1]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_cxzarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_cxzarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="gztit"]//text()|//h3[@class="title"]//text()|//h1[@class="xxgktit"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath('//div[@id="cen5"]//td[contains(text(),"文") and contains(text(),"号：")]/text()').extract()).strip()
    pub_no = pub_no.split('：')[-1]
    index_no = ''.join(res.xpath('//div[@id="cen5"]//td[contains(text(),"索") and contains(text(),"号：")]/text()').extract()).strip()
    index_no = index_no.split('：')[-1]
    subject_word = ''.join(res.xpath('//div[@id="cen5"]//td[contains(text(),"主题词：")]/text()').extract()).strip()
    subject_word = subject_word.split('：')[-1]
    written_date = ''.join(res.xpath('//div[@id="cen5"]//td[contains(text(),"成文日期：")]/text()').extract()).strip()
    written_date = written_date.split('：')[-1]
    # invalid_date = ''.join(res.xpath('//table[@class="govDetailTable"]//td[contains(text(),"废止日期：")]/following::td[1]/text()').extract()).strip()
    # # legal_status = ''.join(res.xpath('//div[@class="zcbt"]//p[contains(text(),"有效性")]/parent::li[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@id="cen5"]//td[contains(text(),"发布机构：")]/text()').extract()).strip()
    organ = organ.split('：')[-1]
    if organ.startswith('州'):
        organ = '云南省楚雄彝族自治' + organ

    fulltext_xpath = '//div[contains(@id,"vsb_content")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99581'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CXZ"
    zt_provider = "daligovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   云南省红河哈尼族彝族自治州
def policy_hhlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('createPageHTML\((\d+)', para_dicts["data"]["1_1"]['html'])
        if not max_count:
            max_count = re.findall('createPage\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"{list_json['page_info']}_{page}"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if 'zfxxgk/fdzdgknr/zfwj/zfwj' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//table[@class="py-table-fixed py-table1"]/tr')[1:]
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[2]/div/a/@href').extract_first()
                base_url = f'http://www.hh.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('&indentifier=')[-1]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99582'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[2]/div/a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('td[3]/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//ul[@class="md-lists"]/li|//div[@class="zfxxgk_zdgkc"]/ul/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                base_url = f'http://www.hh.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-1]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99582'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('span/text()|b/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_hharticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_hharticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="xl-main-text"]//h1//text()|//h1[@class="title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath('//div[@class="xl-main-table"]//div[contains(text(),"文号")]/following::div[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//div[@class="xl-main-table"]//div[contains(text(),"索引号")]/following::div[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@id="cen5"]//td[contains(text(),"主题词：")]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//div[@id="cen5"]//td[contains(text(),"成文日期：")]/text()').extract()).strip()
    raw_type = ''.join(res.xpath('//div[@class="xl-main-table"]//div[contains(text(),"信息分类")]/following::div[1]/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//table[@class="govDetailTable"]//td[contains(text(),"废止日期：")]/following::td[1]/text()').extract()).strip()
    # # legal_status = ''.join(res.xpath('//div[@class="zcbt"]//p[contains(text(),"有效性")]/parent::li[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//div[@class="xl-main-table"]//div[contains(text(),"发布机构")]/following::div[1]/text()').extract()).strip()
    if organ.startswith('州'):
        organ = '云南省红河哈尼族彝族自治' + organ

    fulltext_xpath = '//div[@class="xl-main-text"]|//div[@id="zoom"]|//div[@id="Zoom"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99582'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "HH"
    zt_provider = "hhgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status
    data['raw_type'] = raw_type

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   云南省文山壮族苗族自治州
def policy_ynwslist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('list_gotopage_fun\((\d+)', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 0:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                dic = {"page_info": f"/{total_page - page}.htm"}
                sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                # sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        if page_index == 1:
            base_url = f'http://www.ynws.gov.cn/{callmodel.sql_model.list_rawid}.htm'
        else:
            base_url = f'http://www.ynws.gov.cn/{callmodel.sql_model.list_rawid}/1.htm'
        if 'zfxxgk/zcwj2/xzgfxwj' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//table[@class="tb1"]/tbody/tr')[1:]
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[2]/span/a/@href').extract_first()
                # base_url = f'http://www.lsz.gov.cn/{callmodel.sql_model.list_rawid}/index.html'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('&indentifier=')[-1]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99583'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[2]/span/a/@title').extract_first().strip()
                article_json["pub_date"] = ''
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        elif 'zfxxgk/zcwj2/qtwj' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//ul[@class="list"]/li')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                # base_url = f'http://www.baoshan.gov.cn'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('indentifier=')[-1]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99583'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/@title').extract_first().strip()
                article_json["pub_date"] = li.xpath('span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        elif 'zfxxgk/zdgkxxml/zcjd' in callmodel.sql_model.list_rawid:
            li_list = res.xpath('//ul[@class="zcjd"]/li|//ul[@class="zcjd"]/span')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('a/@href').extract_first()
                # base_url = f'http://www.baoshan.gov.cn'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('indentifier=')[-1]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99583'
                article_json["url"] = url
                article_json["title"] = li.xpath('a/text()').extract_first().strip()
                article_json["pub_date"] = ''
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        else:
            li_list = res.xpath('//tr[contains(@id,"line")]')
            for li in li_list:
                temp = info_dicts.copy()
                temp["task_tag"] = temp["task_tag_next"]
                del temp["task_tag_next"]
                article_json = dict()
                href = li.xpath('td[2]/a/@href').extract_first()
                # base_url = f'http://www.zt.gov.cn'
                url = parse.urljoin(base_url, href)
                if 'htm' not in url:
                    continue
                rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
                # rawid = url.split('/')[-1]
                temp["rawid"] = rawid
                temp["sub_db_id"] = '99583'
                article_json["url"] = url
                article_json["title"] = li.xpath('td[2]/a//text()').extract_first().strip()
                article_json["pub_date"] = li.xpath('td[3]/span/text()').extract_first().strip()
                temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
                di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_ynwsarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_ynwsarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="gztit"]//text()|//h3[@class="title"]//text()|//h1[@class="xxgktit"]//text()').extract()).strip()
    if not title:
        title = ''.join(res.xpath('//meta[@name="ArticleTitle"]/@content').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if not pub_date:
        pub_date_info = ''.join(res.xpath('//td[@class="title_govinfocontent"]/ancestor::table[1]//td[contains(text(),"公开日期")]/following::td[1]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    if not pub_date:
        pub_date_info = ''.join(res.xpath('//div[@class="bar"]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]

    pub_no = ''.join(res.xpath('//td[@class="title_govinfocontent"]/ancestor::table[1]//td[contains(text(),"文　　号")]/following::td[1]/text()').extract()).strip()
    index_no = ''.join(res.xpath('//td[@class="title_govinfocontent"]/ancestor::table[1]//td[contains(text(),"索")]/following::td[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//td[@class="title_govinfocontent"]/ancestor::table[1]//td[contains(text(),"来源")]/following::td[1]/text()').extract()).strip()
    if organ.startswith('州'):
        organ = '云南省文山壮族苗族自治' + organ

    fulltext_xpath = '//div[contains(@id,"vsb_content")]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99583'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "YNWS"
    zt_provider = "ynwsgovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


#   云南省西双版纳傣族自治州
def policy_xsbnlist_callback(callmodel: CallBackModel[PolicyListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        max_count = re.findall('>.*?/(\d+)&', para_dicts["data"]["1_1"]['html'])
        max_count = int(max_count[0]) if max_count else 1
        total_page = max_count
        page_index = int(callmodel.sql_model.page_index)
        if page_index == 1:
            sql_dict = callmodel.sql_model.dict()
            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict = deal_sql_dict(sql_dict)
            list_json = json.loads(callmodel.sql_model.list_json)
            for page in range(page_index + 1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                # dic = {"page_info": f"/{total_page - page}.htm"}
                # sql_dict["list_json"] = json.dumps(dic, ensure_ascii=False)
                sql_dict["list_json"] = callmodel.sql_model.list_json
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        res = Selector(text=para_dicts["data"]["1_1"]['html'])
        # if 'zfxxgk/zcwj2/xzgfxwj' in callmodel.sql_model.list_rawid:
        li_list = res.xpath('//ul[@id="newslist"]/li')
        for li in li_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            article_json = dict()
            href = li.xpath('a/@href').extract_first()
            base_url = f'https://www.xsbn.gov.cn/{callmodel.sql_model.list_rawid}.news.list.dhtml'
            url = parse.urljoin(base_url, href)
            if 'htm' not in url:
                continue
            # rawid = re.findall('(.*?)\.', url.split('/')[-1])[0]
            rawid = url.split('news_id=')[-1].strip()
            temp["rawid"] = rawid
            temp["sub_db_id"] = '99584'
            article_json["url"] = url
            article_json["title"] = li.xpath('a/@title').extract_first().strip()
            article_json["pub_date"] = li.xpath('span[1]/text()').extract_first().strip()
            temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def policy_xsbnarticle_callback(callmodel: CallBackModel[PolicyArticleModel]) -> DealModel:
    result = DealModel()
    return result


def policy_xsbnarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//p[contains(@class,"content_title")]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()
    if not pub_date:
        pub_date_info = ''.join(res.xpath('//li[contains(text(),"发布时间:")]/text()').extract()).strip()
        pub_date = clean_pubdate(pub_date_info)
        pub_year = pub_date[:4]
    if not pub_date:
        raise Exception

    # pub_no = ''.join(res.xpath('//td[@class="title_govinfocontent"]/ancestor::table[1]//td[contains(text(),"文　　号")]/following::td[1]/text()').extract()).strip()
    # index_no = ''.join(res.xpath('//td[@class="title_govinfocontent"]/ancestor::table[1]//td[contains(text(),"索")]/following::td[1]/text()').extract()).strip()
    # subject_word = ''.join(res.xpath('//div[@id="cen5"]//td[contains(text(),"主题词：")]/text()').extract()).strip()
    # written_date = ''.join(res.xpath('//div[@id="cen5"]//td[contains(text(),"成文日期：")]/text()').extract()).strip()
    # invalid_date = ''.join(res.xpath('//table[@class="govDetailTable"]//td[contains(text(),"废止日期：")]/following::td[1]/text()').extract()).strip()
    # # legal_status = ''.join(res.xpath('//div[@class="zcbt"]//p[contains(text(),"有效性")]/parent::li[1]/text()').extract()).strip()
    # organ = ''.join(res.xpath('//td[@class="title_govinfocontent"]/ancestor::table[1]//td[contains(text(),"来源")]/following::td[1]/text()').extract()).strip()
    # if organ.startswith('州'):
    #     organ = '云南省文山壮族苗族自治' + organ

    fulltext_xpath = '//div[@id="newsContent"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99584'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "XSBN"
    zt_provider = "xsbngovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    # data['pub_no'] = pub_no
    # data['organ'] = organ
    # data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result




def policy_chengduarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="blk01"]/ul/h1//text()|//div[@class="detai_title"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    if 'table table-bordered' in html:
        pub_no = ''.join(res.xpath('//table[@class="table table-bordered"]//strong[contains(text(),"文　　号：")]/parent::td/text()').extract()).strip()
        written_date = ''.join(res.xpath('//table[@class="table table-bordered"]//strong[contains(text(),"签发时间：")]/parent::td/text()').extract()).strip()
        impl_date = ''.join(res.xpath('//table[@class="table table-bordered"]//strong[contains(text(),"生效时间：")]/parent::td/text()').extract()).strip()
        organ = ''.join(res.xpath('//table[@class="table table-bordered"]//strong[contains(text(),"签发单位：")]/parent::td/text()').extract()).strip()
    else:
        pub_no = ''.join(res.xpath('//b[text()="文　　号："]/parent::span[@class="l1"]/text()').extract()).strip()
        written_date = ''.join(res.xpath('//b[text()="成文日期："]/parent::span[@class="l1"]/text()').extract()).strip()
        impl_date = ''.join(res.xpath('//b[text()="生效时间："]/parent::span[@class="l1"]/text()').extract()).strip()
        organ = ''.join(res.xpath('//b[text()="签发单位："]/parent::span[@class="l1"]/text()').extract()).strip()
    if organ.startswith('市'):
        organ = '成都' + organ

    fulltext_xpath = '//td[@id="myFont"]|//div[@class="text_content"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99538'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "CHENGDU"
    zt_provider = "chengdugovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    # data['index_no'] = index_no
    data['written_date'] = clean_pubdate(written_date)
    data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result


def policy_diqingarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    article_json = json.loads(callmodel.sql_model.article_json)
    title = article_json['title']
    provider_url = article_json['url']
    pub_date = clean_pubdate(article_json['pub_date'])
    pub_year = pub_date[:4]
    res = Selector(text=html)

    title = ''.join(res.xpath('//div[@class="xxgk-articleTitle"]//text()').extract()).strip()
    if not title:
        title = article_json['title'].strip()

    pub_no = ''.join(res.xpath('//dl[@class="xxgk-reference"]//div[contains(text(),"文") and contains(text(),"号")]/following::div[1]/text()').extract()).strip()
    if pub_no == '无':
        pub_no = ''
    index_no = ''.join(res.xpath('//dl[@class="xxgk-reference"]//div[contains(text(),"索引号")]/following::div[1]/text()').extract()).strip()
    organ = ''.join(res.xpath('//dl[@class="xxgk-reference"]//div[contains(text(),"来") and contains(text(),"源")]/following::div[1]/text()').extract()).strip()
    if pub_no == '本站':
        pub_no = ''
    if organ.startswith('州'):
        organ = '迪庆藏族自治' + organ

    fulltext_xpath = '//div[@class="xxgk-articleBox"]'
    fulltext = res.xpath(fulltext_xpath).extract_first()
    if not fulltext:
        raise Exception

    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '99579'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    product = "DIQING"
    zt_provider = "diqinggovpolicy"
    data = init_data(rawid, lngid, sub_db_id, down_date_str, product, zt_provider)
    print(lngid)

    data['title'] = title
    data['provider_url'] = provider_url
    data['pub_date'] = pub_date
    data['pub_year'] = pub_year
    data['pub_no'] = pub_no
    data['organ'] = organ
    data['index_no'] = index_no
    # data['written_date'] = clean_pubdate(written_date)
    # data['impl_date'] = clean_pubdate(impl_date)
    # data['invalid_date'] = clean_pubdate(invalid_date)
    # data['subject'] = subject
    # data['subject_word'] = subject_word
    # data['legal_status'] = legal_status

    save_data.append({'table': 'policy_latest', 'data': data})
    full_text_data = init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year)
    save_data.append({'table': 'policy_fulltext_latest', 'data': full_text_data})
    result.save_data = save_data

    file_info = get_file_info(data, res, f'({fulltext_xpath})')
    di_model_bef = DealUpdateModel()
    if file_info:
        di_model_bef.update.update({"other_dicts": json.dumps(file_info, ensure_ascii=False)})
    else:
        di_model_bef.update.update({"other_dicts": "{}"})
    di_model_bef.where.update({"rawid": callmodel.sql_model.rawid, "task_tag": callmodel.sql_model.task_tag,
                               "task_name": callmodel.sql_model.task_name})
    result.befor_dicts.update_list.append(di_model_bef)
    return result