import binascii
import json
import time

import re
from math import ceil
from urllib import parse
from urllib.parse import unquote

import mmh3
from parsel import Selector
from re_common.baselibrary.utils.basetime import BaseTime

from re_common.vip.baseencodeid import BaseLngid

from apps.crawler_platform.core_platform.core_sql import CoreSqlValue
from apps.crawler_platform.core_platform.g_model import DealModel, CallBackModel, JournalListModel, JournalIssueModel, \
    JournalArticleModel, DealInsertModel, JournalHomeModel, EtlDealModel, DealUpdateModel, OtherArticleModel

__all__ = [
    "cqvipqyreport_eastmoneyindustrylist_callback",
    "cqvipqyreport_eastmoneyindustryarticle_callback",
    "cqvipqyreport_eastmoneyindustryarticle_etl_callback",
    "cqvipqyreport_eastmoneynewstocklist_callback",
    "cqvipqyreport_eastmoneynewstockarticle_callback",
    "cqvipqyreport_eastmoneynewstockarticle_etl_callback",
    "cqvipqyreport_eastmoneymacresearchlist_callback",
    "cqvipqyreport_eastmoneymacresearcharticle_callback",
    "cqvipqyreport_eastmoneymacresearcharticle_etl_callback",
    "cqvipqyreport_iresearchreportlist_callback",
    "cqvipqyreport_iresearchreportarticle_callback",
    "cqvipqyreport_iresearchreportarticle_etl_callback",
    "cqvipqyreport_wkaskcilist_callback",
    "cqvipqyreport_wkaskciarticle_callback",
    "cqvipqyreport_wkaskciarticle_etl_callback",
    "cqvipqyreport_baogaotinglist_callback",
    "cqvipqyreport_baogaotingarticle_callback",
    "cqvipqyreport_baogaotingarticle_etl_callback",
]


# 函数区 ******************************************************
def deal_sql_dict(sql_dict):
    sql_dict.pop("id")
    sql_dict.pop("update_time")
    sql_dict.pop("create_time")
    sql_dict.pop("null_dicts")
    sql_dict.pop("err_msg")
    sql_dict.pop("other_dicts")
    sql_dict.pop("state")
    sql_dict.pop("failcount")
    return sql_dict

def make_url(href, base_url):
    url = parse.urljoin(base_url, href)
    return url

def solve_date(date_str):
    rule = "\d{4}-\d{2}-\d{2}"
    datas = re.findall(rule, date_str)
    if datas:
        return datas[0]
    else:
        return date_str


def judge_pdf(pdf_url):
    if pdf_url.endswith("pdf") or pdf_url.endswith("PDF"):
        return "pdf", pdf_url
    else:
        return "attach", [pdf_url]

def format_date(date_str, input_rule, output_rule):
    bt = BaseTime()
    bbb = bt.string_to_datetime(date_str, format=input_rule)
    new_str = bt.datetime_to_string(bbb, format=output_rule)
    return new_str

def check_none(data):
    if data is not None:
        return data
    else:
        return ""

def checkExist(obj):
    if obj is not None and len(obj) > 0:
        return True
    else:
        return False



def ext_name_format(ext_name):
    if ext_name == "pdf":
        file_type = "pdf"
    elif ext_name in ["gif", "jpg", "jpeg", "png", "bmp", "tif", "emf", "tiff", "eps"]:
        file_type = "pic"
    elif ext_name == "txt":
        file_type = "txt"
    elif ext_name == "xml":
        file_type = "xml"
    elif ext_name == "ofd":
        file_type = "ofd"
    elif ext_name == "rtf":
        file_type = "rtf"
    elif ext_name in ["html", "mhtml", "htm"]:
        file_type = "html"
    elif ext_name in ["doc", "docx", "docm", "dotx", "dotm"]:
        file_type = "doc"
    elif ext_name in ["xls", "xlsx", "xlm", "xlsm"]:
        file_type = "xls"
    elif ext_name in ["ppt", "pptx"]:
        file_type = "ppt"
    elif ext_name in ["zip", "rar", "arj", "7z", "gzip", "gz", "bz", "bz2", "lz4", "tar", "taz", "rpm", "lzo",
                      "wmz"]:
        file_type = "zip"
    elif ext_name in ["cda", "wav", "mp3", "aif", "aiff", "mid", "wma", "ra", "vqf", "ape"]:
        file_type = "audio"
    elif ext_name in ["avi", "wmv", "mpg", "mpeg", "mov", "rm", "ram", "swf", "flv", "mp4", "rmvb"]:
        file_type = "video"
    elif ext_name in ["wps", "et"]:
        file_type = "wps"
    elif ext_name in ["fbd", "fit", "indd", "cdr", "ceb", "cebx"]:
        file_type = "typeset"
    elif ext_name in ["exe", "dll", "scr", "com", "vb", "vbs", "js", "bat", "sh"]:
        file_type = ""
    else:
        file_type = "other"
    return file_type




def init_full_text_data(lngid, sub_db_id, down_date_str, fulltext, pub_year):
    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '12'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    full_text_data['fulltext_size'] = ''
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    return full_text_data


def addr_hash(filename):
    hash_code = binascii.b2a_hex(mmh3.hash_bytes(filename)).upper()[0:3]
    hash_name = [chr(hash_code[0]), chr(hash_code[1]), chr(hash_code[2])]
    if hash_name[2] == 'D':
        hash_name[2] = 'A'
    elif hash_name[2] == 'E':
        hash_name[2] = 'B'
    elif hash_name[2] == 'F':
        hash_name[2] = 'C'
    elif hash_name[2].isdigit():
        hash_name[2] = str(int(hash_name[2]) % 5)
    return ''.join(hash_name)


def transform(pub_year, filename):
    return 'report_file_new/' + pub_year + '/' + addr_hash(filename) + '/' + filename


def solve_indust(data):
    data_dict = {
        "暂无分类": "",
        "文化传媒": "传媒娱乐",
        "医药制造": "生命健康",
        "汽车行业": "先进制造",
        "电子信息": "信息技术",
        "输配电气": "先进制造",
        "电子元件": "先进制造",
        "化工行业": "传统产业",
        "食品饮料": "食品消费",
        "机械行业": "先进制造",
        "房地产": "地产金融",
        "通讯行业": "信息技术",
        "银行": "地产金融",
        "商业百货": "传统产业",
        "有色金属": "传统产业",
        "券商信托": "地产金融",
        "家电行业": "传统产业",
        "环保工程": "节能环保",
        "旅游酒店": "食品消费",
        "航天航空": "先进制造",
        "农牧饲渔": "传统产业",
        "深市创业板": "",
        "钢铁行业": "传统产业",
        "造纸印刷": "传统产业",
        "交运物流": "传统产业",
        "水泥建材": "传统产业",
        "公用事业": "传统产业",
        "煤炭采选": "传统产业",
        "纺织服装": "传统产业",
        "创业版": "",
        "多元金融": "地产金融",
        "石油行业": "传统产业",
        "电力行业": "传统产业",
        "工程建设": "传统产业",
        "沪市科创板": "",
        "保险": "地产金融",
        "房地产开发": "地产金融",
        "文教休闲": "传统产业",
        "装修装饰": "传统产业",
        "企业服务": "传统产业",
        "香港交易所": "",
        "媒体文娱": "传媒娱乐",
        "科创版": "",
        "化学制药": "食品消费",
        "化学制品": "传统产业",
        "互联网服务": "信息科技",
        "汽车整车": "先进制造",
        "消费电商": "食品消费",
        "汽车零部件": "先进制造",
        "通信设备": "信息科技",
        "医疗行业": "生命健康",
        "沪市主板": "",
        "煤炭行业": "传统产业",
        "通用设备": "传统产业",
        "深市主板": "",
        "材料行业": "先进制造",
        "医药商业": "生命健康",
        "计算机设备": "信息科技",
        "金融行业": "地产金融",
        "光伏设备": "节能环保",
        "化学原料": "传统产业",
        "电源设备": "传统产业",
        "电网设备": "传统产业",
        "环保行业": "节能环保",
        "家用轻工": "传统产业",
        "证券": "地产金融",
        "上交所": "",
        "交运设备": "传统产业",
        "深交所创业板": "",
        "半导体": "先进制造",
        "专用设备": "先进制造",
        "电池": "节能环保",
        "广告营销": "传媒娱乐",
        "游戏行业": "传媒娱乐",
        "医疗服务": "生命健康",
        "物流行业": "传统产业",
        "生物制品": "生命健康",
        "木业家具": "传统产业",
        "深交所中小板": "",
        "装修建材": "传统产业",
        "其他行业": "传统产业",
        "金属制品": "传统产业",
        "中药": "生命健康",
        "教育行业": "食品消费",
        "工程机械": "先进制造",
        "网络服务": "传统产业",
        "美容护理": "食品消费",
        "通信服务": "信息技术",
        "AI大数据": "信息科技",
        "软件开发": "信息科技",
        "消费者洞察": "食品消费",
        "支付行业": "地产金融",
        "医疗器械": "生命健康",
        "珠宝首饰": "食品消费",
        "风电设备": "节能环保",
        "视频媒体": "传媒娱乐",
        "软件服务": "信息技术",
        "医疗健康": "生命健康",
        "应用服务": "信息技术",
        "旅游行业": "食品消费",
        "玻璃陶瓷": "传统产业",
        "港口水运": "传统产业",
        "教育": "食品消费",
        "非金属材料": "传统产业",
        "玻璃玻纤": "节能环保",
        "银行行业": "地产金融",
        "能源金属": "传统产业",
        "化纤行业": "先进制造",
        "综合行业": "传统产业",
        "电池行业": "节能环保",
        "房产行业": "地产金融",
        "航空机场": "传统产业",
        "专业服务": "传统产业",
        "电子商务": "食品消费",
        "证券行业": "地产金融",
        "人工智能": "信息技术",
        "房地产服务": "地产金融",
        "农药兽药": "传统产业",
        "消费电子": "食品消费",
        "深市中小板": "",
        "航运港口": "传统产业",
        "化肥行业": "传统产业",
        "安防设备": "传统产业",
        "游戏": "传媒娱乐",
        "园林工程": "传统产业",
        "光学光电子": "信息技术",
        "民航机场": "先进制造",
        "保险行业": "地产金融",
        "小金属": "传统产业",
        "酿酒行业": "传统产业",
        "电机": "先进制造",
        "先进制造": "先进制造",
        "船舶制造": "先进制造",
        "电信运营": "信息技术",
        "贵金属": "传统产业",
        "工艺商品": "食品消费",
        "燃气": "传统产业",
        "贸易行业": "传统产业",
        "能源环保": "节能环保",
        "铁路公路": "传统产业",
        "高速公路": "传统产业",
        "采掘行业": "传统产业",
        "区块链": "信息技术",
        "仪器仪表": "先进制造",
        "橡胶制品": "传统产业",
        "中药行业": "生命健康",
        "包装材料": "传统产业",
        "汽车服务": "先进制造",
        "塑胶制品": "传统产业",
        "塑料制品": "传统产业",
    }
    first_indust = data_dict.get(data, "")
    return first_indust


def create_base_report_dic():
    base_dic = dict()
    base_dic["keyid"] = ""
    base_dic["lngid"] = ""
    base_dic["doi"] = ""
    base_dic["rawid"] = ""
    base_dic["rawid_alt"] = ""
    base_dic["provider"] = ""
    base_dic["product"] = ""
    base_dic["sub_db_id"] = ""
    base_dic["sub_db"] = ""
    base_dic["source_type"] = ""
    base_dic["provider_url"] = ""
    base_dic["latest_date"] = ""
    base_dic["is_deprecated"] = ""
    base_dic["batch"] = ""
    base_dic["down_date"] = ""
    base_dic["title"] = ""
    base_dic["title_alt"] = ""
    base_dic["title_series"] = ""
    base_dic["keyword"] = ""
    base_dic["keyword_alt"] = ""
    base_dic["subject"] = ""
    base_dic["subject_word"] = ""
    base_dic["sub_db_class_name"] = ""
    base_dic["abstract"] = ""
    base_dic["abstract_alt"] = ""
    base_dic["raw_type"] = ""
    base_dic["raw_variety"] = ""
    base_dic["pub_date"] = ""
    base_dic["page_info"] = ""
    base_dic["pub_no"] = ""
    base_dic["register_no"] = ""
    base_dic["std_no"] = ""
    base_dic["author_id"] = ""
    base_dic["author_1st"] = ""
    base_dic["author"] = ""
    base_dic["author_alt"] = ""
    base_dic["author_intro"] = ""
    base_dic["fund"] = ""
    base_dic["fund_alt"] = ""
    base_dic["fund_id"] = ""
    base_dic["organ_id"] = ""
    base_dic["organ"] = ""
    base_dic["organ_alt"] = ""
    base_dic["sponsor"] = ""
    base_dic["pub_year"] = ""
    base_dic["pub_place"] = ""
    base_dic["publisher"] = ""
    base_dic["isbn"] = ""
    base_dic["is_net1st"] = ""
    base_dic["journal_name"] = ""
    base_dic["journal_name_alt"] = ""
    base_dic["raw_source"] = ""
    base_dic["country"] = ""
    base_dic["language"] = ""
    base_dic["other_info"] = []
    base_dic["fulltext_type"] = ""
    return base_dic


def creat_base_html_dic():
    html_dic = {
        "_id": "",
        "filename": "",
        "keyid": "",
        "lngid": "",
        "sub_db_id": "",
        "raw_source_url": "",
        "source_type": "",
        "latest_date": "",
        "is_deprecated": "",
        "fulltext_type": "",
        "fulltext_addr": "",
        "fulltext_size": "",
        "fulltext_txt": "",
        "page_cnt": "",
        "pub_year": ""
    }
    return html_dic


# callback区域********************************************************************
def cqvipqyreport_eastmoneyindustrylist_callback(callmodel: CallBackModel[JournalHomeModel]) -> DealModel:
    result = DealModel()

    # rule = re.compile(r"http://(.*?).cma-cmc.com.cn")
    para_dicts = callmodel.para_dicts
    # print(para_dicts)
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    sql_model = callmodel.sql_model
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next,
                  "sub_db_id": sql_model.sub_db_id
                  }
    data_json = para_dicts['data']['1_1']['html']
    data = json.loads(data_json)
    page_index = int(callmodel.sql_model.page_index)
    if page_index == 1:
        di_model_bef = DealInsertModel()
        di_model_bef.insert_pre = CoreSqlValue.replace_it
        total_page = ceil(data["hits"] / data["size"])
        sql_dict = callmodel.sql_model.dict()
        sql_dict = deal_sql_dict(sql_dict)
        for page in range(2, int(total_page) + 1):
            sql_dict["page"] = total_page
            sql_dict["page_index"] = page
            di_model_bef.lists.append(sql_dict.copy())
            if len(di_model_bef.lists) > 10000:
                result.befor_dicts.insert.append(di_model_bef)
                di_model_bef = DealInsertModel()
                di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
        result.befor_dicts.insert.append(di_model_bef)
    di_model_next = DealInsertModel()
    di_model_next.insert_pre = CoreSqlValue.insert_ig_it
    li_list = data["data"]
    for li in li_list:
        temp = info_dicts.copy()
        article_json = dict()
        temp["task_tag"] = temp["task_tag_next"]
        del temp["task_tag_next"]
        rawid = li["infoCode"] + "_A01"
        orgName = li["orgName"]
        orgSName = li["orgSName"]
        industryName = li["industryName"]
        title = li["title"]
        author = li["researcher"].replace(",", ";")
        publishDate = li["publishDate"]
        pub_date = solve_date(publishDate)
        url = f'https://data.eastmoney.com/report/zw_industry.jshtml?infocode={li["infoCode"]}'

        temp["rawid"] = rawid
        article_json["orgName"] = orgName
        article_json["orgSName"] = orgSName
        article_json["url"] = url
        article_json["title"] = title
        article_json["industryName"] = industryName
        article_json["author"] = author
        article_json["pub_date"] = pub_date
        temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
        di_model_next.lists.append(temp)
    result.next_dicts.insert.append(di_model_next)

    return result


def cqvipqyreport_eastmoneyindustryarticle_callback(callmodel: CallBackModel[OtherArticleModel]) -> DealModel:
    result = DealModel()
    return result




def cqvipqyreport_eastmoneyindustryarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts

    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])

    data = create_base_report_dic()
    sub_db_id = "00780"
    datainfo = para_dicts["data"]["1_1"]["data"]
    title = datainfo["title"]
    pdf_url = datainfo["pdf_url"]
    fulltext = datainfo["fulltext"]
    # title_alt = datainfo["title_alt"]

    src_data = down_model["1_1"]
    fulltext_type = ""
    if checkExist(fulltext):
        fulltext_type += ";html"
    if checkExist(pdf_url):
        if pdf_url.startswith("http"):
            ext_name_raw = pdf_url.split(".")[-1]
            ext_name = ext_name_format(ext_name_raw)
            fulltext_type += f";{ext_name}"

    down_date = src_data.down_date.split(" ")[0].replace("-", "")
    data["rawid"] = sql_model["rawid"]
    rawid = data["rawid"]
    data["rawid_mysql"] = sql_model["rawid"]
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)

    data["lngid"] = lngid
    data["keyid"] = lngid

    data["title"] = title.replace("&nbsp", "").replace("#br#", "").strip()
    # data["num"] = article_json["num"]
    data["source_type"] = "12"
    data["product"] = "VIPQY"
    data["sub_db"] = "REPORT"
    data["provider"] = "VIP"
    # sub_db_judge 为自留字段, 因为所有报告全部使用一个sub_db_id, 因此, 为了区分这里面的库, 使用这个不上数仓的自留字段存放这个自定义库的编号, 编号由我自己定的, 且这个编号作为rawid的一份子
    data["sub_db_judge"] = "A01"

    # data["zt_provider"] = "cmacmcjournal"
    data["sub_db_id"] = sub_db_id
    data["is_deprecated"] = "0"
    data["provider_url"] = article_json["url"]
    author = article_json["author"]
    data["author"] = author
    data["author_1st"] = author.split(";")[0].replace("[1]", "").replace("[2]", "").strip()
    data["organ"] = check_none(article_json.get("orgSName"))
    pub_date = check_none(article_json.get("pub_date")).replace("-", "")
    data["pub_date"] = pub_date
    data["down_date"] = down_date
    data["latest_date"] = down_date
    industry_second = article_json.get("industryName", "")
    industry_first = solve_indust(industry_second)
    subject = f"{industry_first}@{industry_second}"
    data["subject"] = subject
    data["sub_db_class_name"] = industry_second
    data["fulltext_type"] = fulltext_type.lstrip(";")
    pub_year = pub_date[0:4]
    data["pub_year"] = pub_year
    batch = time.strftime("%Y%m%d_%H%M%S", time.localtime(time.time()))
    data["batch"] = batch
    data["pdf_url"] = pdf_url

    save_data.append({'table': 'cqvipqyreport_latest_new', 'data': data})

    html_dic = creat_base_html_dic()
    html_dic["_id"] = f"{lngid}_content.html"
    html_dic["filename"] = f"{lngid}_content.html"
    html_dic["keyid"] = lngid
    html_dic["lngid"] = lngid
    html_dic["sub_db_id"] = sub_db_id
    html_dic["raw_source_url"] = article_json["url"]
    html_dic["source_type"] = "12"
    html_dic["latest_date"] = down_date
    html_dic["is_deprecated"] = "0"
    html_dic["fulltext_type"] = "html"
    html_dic["fulltext_size"] = len(fulltext)
    html_dic["fulltext_txt"] = fulltext
    html_dic["page_cnt"] = 1
    html_dic["pub_year"] = pub_year
    save_data.append({'table': 'cqvipqyreport_fulltext_latest_new', 'data': html_dic})

    result.save_data = save_data
    # with open("test1.json", mode="w", encoding="utf-8") as f:
    #     f.write(json.dumps(save_data, ensure_ascii=False))
    return result


def cqvipqyreport_eastmoneynewstocklist_callback(callmodel: CallBackModel[JournalHomeModel]) -> DealModel:
    result = DealModel()

    # rule = re.compile(r"http://(.*?).cma-cmc.com.cn")
    para_dicts = callmodel.para_dicts
    # print(para_dicts)
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    sql_model = callmodel.sql_model
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next,
                  "sub_db_id": sql_model.sub_db_id
                  }
    data_json = para_dicts['data']['1_1']['html']
    data = json.loads(data_json)
    page_index = int(callmodel.sql_model.page_index)
    if page_index == 1:
        di_model_bef = DealInsertModel()
        di_model_bef.insert_pre = CoreSqlValue.replace_it
        total_page = ceil(data["hits"] / data["size"])
        # print(total_page)
        sql_dict = callmodel.sql_model.dict()
        sql_dict = deal_sql_dict(sql_dict)
        for page in range(2, int(total_page) + 1):
            sql_dict["page"] = total_page
            sql_dict["page_index"] = page
            di_model_bef.lists.append(sql_dict.copy())
            if len(di_model_bef.lists) > 10000:
                result.befor_dicts.insert.append(di_model_bef)
                di_model_bef = DealInsertModel()
                di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
        result.befor_dicts.insert.append(di_model_bef)
    di_model_next = DealInsertModel()
    di_model_next.insert_pre = CoreSqlValue.insert_ig_it
    li_list = data["data"]
    for li in li_list:
        temp = info_dicts.copy()
        article_json = dict()
        temp["task_tag"] = temp["task_tag_next"]
        del temp["task_tag_next"]
        rawid = li["infoCode"] + "_A02"
        orgName = li["orgName"]
        orgSName = li["orgSName"]
        industryName = li["industryName"]
        title = li["title"]
        author = li["researcher"].replace(",", ";")
        publishDate = li["publishDate"]
        pub_date = solve_date(publishDate)
        url = f'https://data.eastmoney.com/report/info/{li["infoCode"]}.html'

        temp["rawid"] = rawid
        article_json["orgName"] = orgName
        article_json["orgSName"] = orgSName
        article_json["url"] = url
        article_json["title"] = title
        article_json["industryName"] = industryName
        article_json["author"] = author
        article_json["pub_date"] = pub_date
        temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
        di_model_next.lists.append(temp)
    result.next_dicts.insert.append(di_model_next)

    return result


def cqvipqyreport_eastmoneynewstockarticle_callback(callmodel: CallBackModel[OtherArticleModel]) -> DealModel:
    result = DealModel()
    return result




def cqvipqyreport_eastmoneynewstockarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts

    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])

    data = create_base_report_dic()
    sub_db_id = "00780"
    datainfo = para_dicts["data"]["1_1"]["data"]
    title = datainfo["title"]
    pdf_url = datainfo["pdf_url"]
    fulltext = datainfo["fulltext"]
    # title_alt = datainfo["title_alt"]

    src_data = down_model["1_1"]
    fulltext_type = ""
    if checkExist(fulltext):
        fulltext_type += ";html"
    if checkExist(pdf_url):
        if pdf_url.startswith("http"):
            ext_name_raw = pdf_url.split(".")[-1]
            ext_name = ext_name_format(ext_name_raw)
            fulltext_type += f";{ext_name}"
    down_date = src_data.down_date.split(" ")[0].replace("-", "")
    data["rawid"] = sql_model["rawid"]
    rawid = data["rawid"]
    data["rawid_mysql"] = sql_model["rawid"]
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)

    data["lngid"] = lngid
    data["keyid"] = lngid

    data["title"] = title.replace("&nbsp", "").replace("#br#", "").strip()
    # data["num"] = article_json["num"]
    data["source_type"] = "12"
    data["product"] = "VIPQY"
    data["sub_db"] = "REPORT"
    data["provider"] = "VIP"
    # sub_db_judge 为自留字段, 因为所有报告全部使用一个sub_db_id, 因此, 为了区分这里面的库, 使用这个不上数仓的自留字段存放这个自定义库的编号, 编号由我自己定的, 且这个编号作为rawid的一份子
    data["sub_db_judge"] = "A02"

    # data["zt_provider"] = "cmacmcjournal"
    data["sub_db_id"] = sub_db_id
    data["is_deprecated"] = "0"
    data["provider_url"] = article_json["url"]
    author = article_json["author"]
    data["author"] = author
    data["author_1st"] = author.split(";")[0].replace("[1]", "").replace("[2]", "").strip()
    data["organ"] = check_none(article_json.get("orgSName"))
    pub_date = check_none(article_json.get("pub_date")).replace("-", "")
    data["pub_date"] = pub_date
    data["down_date"] = down_date
    data["latest_date"] = down_date
    industry_second = article_json.get("industryName", "")
    industry_first = solve_indust(industry_second)
    subject = f"{industry_first}@{industry_second}"
    if subject == "@":
        subject = ""
    data["subject"] = subject
    data["sub_db_class_name"] = industry_second
    data["fulltext_type"] = fulltext_type.lstrip(";")
    pub_year = pub_date[0:4]
    data["pub_year"] = pub_year
    batch = time.strftime("%Y%m%d_%H%M%S", time.localtime(time.time()))
    data["batch"] = batch
    data["pdf_url"] = pdf_url

    save_data.append({'table': 'cqvipqyreport_latest_new', 'data': data})

    html_dic = creat_base_html_dic()
    html_dic["_id"] = f"{lngid}_content.html"
    html_dic["filename"] = f"{lngid}_content.html"
    html_dic["keyid"] = lngid
    html_dic["lngid"] = lngid
    html_dic["sub_db_id"] = sub_db_id
    html_dic["raw_source_url"] = article_json["url"]
    html_dic["source_type"] = "12"
    html_dic["latest_date"] = down_date
    html_dic["is_deprecated"] = "0"
    html_dic["fulltext_type"] = "html"
    html_dic["fulltext_size"] = len(fulltext)
    html_dic["fulltext_txt"] = fulltext
    html_dic["page_cnt"] = 1
    html_dic["pub_year"] = pub_year
    save_data.append({'table': 'cqvipqyreport_fulltext_latest_new', 'data': html_dic})

    result.save_data = save_data
    # with open("test1.json", mode="w", encoding="utf-8") as f:
    #     f.write(json.dumps(save_data, ensure_ascii=False))
    return result



def cqvipqyreport_eastmoneymacresearchlist_callback(callmodel: CallBackModel[JournalHomeModel]) -> DealModel:
    result = DealModel()

    # rule = re.compile(r"http://(.*?).cma-cmc.com.cn")
    para_dicts = callmodel.para_dicts
    # print(para_dicts)
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    sql_model = callmodel.sql_model
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next,
                  "sub_db_id": sql_model.sub_db_id
                  }
    data_json = para_dicts['data']['1_1']['html']
    data = json.loads(data_json)
    page_index = int(callmodel.sql_model.page_index)
    if page_index == 1:
        di_model_bef = DealInsertModel()
        di_model_bef.insert_pre = CoreSqlValue.replace_it
        total_page = ceil(data["hits"] / data["size"])
        # print(total_page)
        sql_dict = callmodel.sql_model.dict()
        sql_dict = deal_sql_dict(sql_dict)
        for page in range(2, int(total_page) + 1):
            sql_dict["page"] = total_page
            sql_dict["page_index"] = page
            di_model_bef.lists.append(sql_dict.copy())
            if len(di_model_bef.lists) > 10000:
                result.befor_dicts.insert.append(di_model_bef)
                di_model_bef = DealInsertModel()
                di_model_bef.insert_pre = CoreSqlValue.replace_it
        result.befor_dicts.insert.append(di_model_bef)
    di_model_next = DealInsertModel()
    di_model_next.insert_pre = CoreSqlValue.insert_ig_it
    li_list = data["data"]
    for li in li_list:
        temp = info_dicts.copy()
        article_json = dict()
        temp["task_tag"] = temp["task_tag_next"]
        del temp["task_tag_next"]
        rawid = str(li["id"]) + "_A03"
        orgName = li["orgName"]
        orgSName = li["orgSName"]
        industryName = li["industryName"]
        title = li["title"]
        author = li["researcher"].replace(",", ";")
        publishDate = li["publishDate"]
        pub_date = solve_date(publishDate)
        url = f'https://data.eastmoney.com/report/zw_macresearch.jshtml?encodeUrl={li["encodeUrl"]}'

        temp["rawid"] = rawid
        article_json["orgName"] = orgName
        article_json["orgSName"] = orgSName
        article_json["url"] = url
        article_json["title"] = title
        article_json["industryName"] = industryName
        article_json["author"] = author
        article_json["pub_date"] = pub_date
        temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
        di_model_next.lists.append(temp)
    result.next_dicts.insert.append(di_model_next)

    return result


def cqvipqyreport_eastmoneymacresearcharticle_callback(callmodel: CallBackModel[OtherArticleModel]) -> DealModel:
    result = DealModel()
    return result




def cqvipqyreport_eastmoneymacresearcharticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts

    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])

    data = create_base_report_dic()
    sub_db_id = "00780"
    datainfo = para_dicts["data"]["1_1"]["data"]
    title = datainfo["title"]
    pdf_url = datainfo["pdf_url"]
    fulltext = datainfo["fulltext"]
    # title_alt = datainfo["title_alt"]

    src_data = down_model["1_1"]
    fulltext_type = ""
    if checkExist(fulltext):
        fulltext_type += ";html"
    if checkExist(pdf_url):
        if pdf_url.startswith("http"):
            ext_name_raw = pdf_url.split(".")[-1]
            ext_name = ext_name_format(ext_name_raw)
            fulltext_type += f";{ext_name}"
    down_date = src_data.down_date.split(" ")[0].replace("-", "")
    data["rawid"] = sql_model["rawid"]
    rawid = data["rawid"]
    data["rawid_mysql"] = sql_model["rawid"]
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)

    data["lngid"] = lngid
    data["keyid"] = lngid

    data["title"] = title.replace("&nbsp", "").replace("#br#", "").strip()
    # data["num"] = article_json["num"]
    data["source_type"] = "12"
    data["product"] = "VIPQY"
    data["sub_db"] = "REPORT"
    data["provider"] = "VIP"
    # sub_db_judge 为自留字段, 因为所有报告全部使用一个sub_db_id, 因此, 为了区分这里面的库, 使用这个不上数仓的自留字段存放这个自定义库的编号, 编号由我自己定的, 且这个编号作为rawid的一份子
    data["sub_db_judge"] = "A03"

    # data["zt_provider"] = "cmacmcjournal"
    data["sub_db_id"] = sub_db_id
    data["is_deprecated"] = "0"
    data["provider_url"] = article_json["url"]
    author = article_json["author"]
    data["author"] = author
    data["author_1st"] = author.split(";")[0].replace("[1]", "").replace("[2]", "").strip()
    data["organ"] = check_none(article_json.get("orgSName"))
    pub_date = check_none(article_json.get("pub_date")).replace("-", "")
    data["pub_date"] = pub_date
    data["down_date"] = down_date
    data["latest_date"] = down_date
    industry_second = article_json.get("industryName", "")
    industry_first = solve_indust(industry_second)
    subject = f"{industry_first}@{industry_second}"
    if subject == "@":
        subject = ""
    data["subject"] = subject
    data["sub_db_class_name"] = industry_second
    data["fulltext_type"] = fulltext_type.lstrip(";")
    pub_year = pub_date[0:4]
    data["pub_year"] = pub_year
    batch = time.strftime("%Y%m%d_%H%M%S", time.localtime(time.time()))
    data["batch"] = batch
    data["pdf_url"] = pdf_url

    save_data.append({'table': 'cqvipqyreport_latest_new', 'data': data})

    html_dic = creat_base_html_dic()
    html_dic["_id"] = f"{lngid}_content.html"
    html_dic["filename"] = f"{lngid}_content.html"
    html_dic["keyid"] = lngid
    html_dic["lngid"] = lngid
    html_dic["sub_db_id"] = sub_db_id
    html_dic["raw_source_url"] = article_json["url"]
    html_dic["source_type"] = "12"
    html_dic["latest_date"] = down_date
    html_dic["is_deprecated"] = "0"
    html_dic["fulltext_type"] = "html"
    html_dic["fulltext_size"] = len(fulltext)
    html_dic["fulltext_txt"] = fulltext
    html_dic["page_cnt"] = 1
    html_dic["pub_year"] = pub_year
    save_data.append({'table': 'cqvipqyreport_fulltext_latest_new', 'data': html_dic})

    result.save_data = save_data
    # with open("test1.json", mode="w", encoding="utf-8") as f:
    #     f.write(json.dumps(save_data, ensure_ascii=False))
    return result




def cqvipqyreport_iresearchreportlist_callback(callmodel: CallBackModel[JournalHomeModel]) -> DealModel:
    result = DealModel()

    # rule = re.compile(r"http://(.*?).cma-cmc.com.cn")
    para_dicts = callmodel.para_dicts
    # print(para_dicts)
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    sql_model = callmodel.sql_model
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next,
                  "sub_db_id": sql_model.sub_db_id
                  }
    data_json = para_dicts['data']['1_1']['html']
    data = json.loads(data_json)
    page_index = int(callmodel.sql_model.page_index)
    page_index_next = page_index + 1
    di_model_bef = DealInsertModel()
    di_model_bef.insert_pre = CoreSqlValue.replace_it
    sql_dict = callmodel.sql_model.dict()
    sql_dict = deal_sql_dict(sql_dict)
    list_json = sql_dict["list_json"]
    list_json_dict = json.loads(list_json)
    sql_dict["page_index"] = page_index_next

    di_model_next = DealInsertModel()
    di_model_next.insert_pre = CoreSqlValue.insert_ig_it
    li_list = data["List"]
    freport_id = ""
    for li in li_list:
        temp = info_dicts.copy()
        article_json = dict()
        temp["task_tag"] = temp["task_tag_next"]
        del temp["task_tag_next"]
        rawid = str(li["NewsId"]) + "_B01"
        # orgName = li["orgName"]
        # orgSName = li["orgSName"]
        industryName = li["industry"]
        title = li["sTitle"]
        Author = li["Author"].replace(",", ";")
        publishDate = li["Uptime"]
        pub_date = format_date(publishDate, "%Y/%m/%d %H:%M:%S", "%Y%m%d_%H%M%S")
        detail_url = f'https://www.iresearch.com.cn/Detail/report?id={li["NewsId"]}&isfree=0'
        url = f'https://www.iresearch.com.cn/api/Detail/reportM?id={li["NewsId"]}&isfree=0'
        temp["rawid"] = rawid
        article_json["orgName"] = ""
        article_json["orgSName"] = ""
        article_json["detail_url"] = detail_url
        article_json["url"] = url
        article_json["title"] = title
        article_json["industryName"] = industryName
        article_json["author"] = Author
        article_json["pub_date"] = pub_date
        pdf_url = f'https://www.iresearch.cn/include/ajax/user_ajax.ashx?work=idown&rid={li["NewsId"]}'
        article_json["pdf_url"] = pdf_url
        temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
        di_model_next.lists.append(temp)
        freport_id = li["Id"]
    list_json_dict["lastId"] = freport_id
    sql_dict["list_json"] = json.dumps(list_json_dict, ensure_ascii=False)
    di_model_bef.lists.append(sql_dict.copy())
    result.befor_dicts.insert.append(di_model_bef)
    result.next_dicts.insert.append(di_model_next)

    return result


def cqvipqyreport_iresearchreportarticle_callback(callmodel: CallBackModel[OtherArticleModel]) -> DealModel:
    result = DealModel()
    return result



# xj认为这种json里面的一个文章简介数据不足以作为一条html , 因此这个库暂时取消html全文
def cqvipqyreport_iresearchreportarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts

    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])

    data = create_base_report_dic()
    sub_db_id = "00780"
    datainfo = para_dicts["data"]["1_1"]["List"][0]
    title = datainfo["Title"]
    pdf_url = article_json["pdf_url"]
    cover_url = datainfo["Topic"]
    keywords = datainfo["keywords"]
    abstract = check_none(datainfo.get("Content"))
    abstract = abstract.replace("<p>", "")
    # fulltext = datainfo["fulltext"]
    # title_alt = datainfo["title_alt"]

    src_data = down_model["1_1"]
    fulltext_type = ""
    # if checkExist(fulltext):
    #     fulltext_type += ";html"
    if checkExist(pdf_url):
        if pdf_url.startswith("http"):
            ext_name_raw = pdf_url.split(".")[-1]
            ext_name = ext_name_format(ext_name_raw)
            fulltext_type += f";{ext_name}"
    if checkExist(cover_url):
        fulltext_type += ";pic"
    down_date = src_data.down_date.split(" ")[0].replace("-", "")
    data["rawid"] = sql_model["rawid"]
    rawid = data["rawid"]
    data["rawid_mysql"] = sql_model["rawid"]
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)

    data["lngid"] = lngid
    data["keyid"] = lngid

    data["title"] = title.replace("&nbsp", "").replace("#br#", "").strip()
    # data["num"] = article_json["num"]
    data["source_type"] = "12"
    data["product"] = "VIPQY"
    data["sub_db"] = "REPORT"
    data["provider"] = "VIP"
    # sub_db_judge 为自留字段, 因为所有报告全部使用一个sub_db_id, 因此, 为了区分这里面的库, 使用这个不上数仓的自留字段存放这个自定义库的编号, 编号由我自己定的, 且这个编号作为rawid的一份子
    data["sub_db_judge"] = "B01"

    # data["zt_provider"] = "cmacmcjournal"
    data["sub_db_id"] = sub_db_id
    data["is_deprecated"] = "0"
    data["provider_url"] = article_json["detail_url"]
    author = article_json["author"]
    data["author"] = author
    data["author_1st"] = author.split(";")[0].replace("[1]", "").replace("[2]", "").strip()
    data["organ"] = check_none(article_json.get("orgSName"))
    pub_date = check_none(article_json.get("pub_date")).replace("-", "")
    data["pub_date"] = pub_date
    data["down_date"] = down_date
    data["latest_date"] = down_date
    data["abstract"] = abstract
    data["keyword"] = keywords
    industry_second = article_json.get("industryName", "")
    industry_first = solve_indust(industry_second)
    subject = f"{industry_first}@{industry_second}"
    if subject == "@":
        subject = ""
    data["subject"] = subject.strip("@")
    data["sub_db_class_name"] = industry_second
    data["fulltext_type"] = fulltext_type.lstrip(";")
    pub_year = pub_date[0:4]
    data["pub_year"] = pub_year
    batch = time.strftime("%Y%m%d_%H%M%S", time.localtime(time.time()))
    data["batch"] = batch
    data["pdf_url"] = pdf_url
    data["cover_url"] = cover_url

    save_data.append({'table': 'cqvipqyreport_latest_new', 'data': data})

    # html_dic = creat_base_html_dic()
    # html_dic["_id"] = f"{lngid}_content.html"
    # html_dic["filename"] = f"{lngid}_content.html"
    # html_dic["keyid"] = lngid
    # html_dic["lngid"] = lngid
    # html_dic["sub_db_id"] = sub_db_id
    # html_dic["raw_source_url"] = article_json["url"]
    # html_dic["source_type"] = "12"
    # html_dic["latest_date"] = down_date
    # html_dic["is_deprecated"] = "0"
    # html_dic["fulltext_type"] = "html"
    # html_dic["fulltext_size"] = len(fulltext)
    # html_dic["fulltext_txt"] = fulltext
    # html_dic["page_cnt"] = 1
    # html_dic["pub_year"] = pub_year
    # save_data.append({'table': 'cqvipqyreport_fulltext_latest_new', 'data': html_dic})

    result.save_data = save_data
    # with open("test1.json", mode="w", encoding="utf-8") as f:
    #     f.write(json.dumps(save_data, ensure_ascii=False))
    # print(result)
    return result



def cqvipqyreport_wkaskcilist_callback(callmodel: CallBackModel[JournalHomeModel]) -> DealModel:
    result = DealModel()

    # rule = re.compile(r"http://(.*?).cma-cmc.com.cn")
    para_dicts = callmodel.para_dicts
    # print(para_dicts)
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    sql_model = callmodel.sql_model
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next,
                  "sub_db_id": sql_model.sub_db_id
                  }
    data = para_dicts['data']['1_1']

    page_index = int(callmodel.sql_model.page_index)
    di_model_bef = DealInsertModel()
    di_model_bef.insert_pre = CoreSqlValue.replace_it
    sql_dict = callmodel.sql_model.dict()
    sql_dict = deal_sql_dict(sql_dict)
    list_json = sql_dict["list_json"]
    list_json_dict = json.loads(list_json)
    if page_index == 1:
        di_model_bef = DealInsertModel()
        di_model_bef.insert_pre = CoreSqlValue.replace_it
        total_page = ceil(int(data["count"])/10)
        sql_dict = callmodel.sql_model.dict()
        sql_dict = deal_sql_dict(sql_dict)
        for page in range(2, int(total_page) + 1):
            sql_dict["page"] = total_page
            sql_dict["page_index"] = page
            sql_dict["list_json"] = json.dumps(list_json_dict, ensure_ascii=False)
            di_model_bef.lists.append(sql_dict.copy())
            if len(di_model_bef.lists) > 10000:
                result.befor_dicts.insert.append(di_model_bef)
                di_model_bef = DealInsertModel()
                di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
        result.befor_dicts.insert.append(di_model_bef)


    di_model_next = DealInsertModel()
    di_model_next.insert_pre = CoreSqlValue.insert_ig_it
    li_list = data["data"]
    for li in li_list:
        temp = info_dicts.copy()
        article_json = dict()
        temp["task_tag"] = temp["task_tag_next"]
        del temp["task_tag_next"]
        rawid = str(li["BookId"]) + "_C01"
        orgName = li["BookPublisher"]
        # orgSName = li["orgSName"]
        industryName = ""
        title = li["BookName"]
        # Author = li["Author"].replace(",", ";")
        keywords = li["StrBookTagName"].replace(",", ";")
        publishDate = li["StrBookPublishDate"]
        if publishDate == "":
            pub_date = ""
            pub_year = ""
        else:
            pub_date = format_date(publishDate, "%Y-%m-%d", "%Y%m%d_%H%M%S")
            pub_year = publishDate[0:4]
        # detail_url = f'https://www.iresearch.com.cn/Detail/report?id={li["NewsId"]}&isfree=0'
        base_url = "https://wk.askci.com/"
        url = make_url(li["ReadUrl"], base_url)
        temp["rawid"] = rawid
        article_json["orgName"] = orgName
        article_json["orgSName"] = ""
        # article_json["detail_url"] = detail_url
        article_json["url"] = url
        article_json["title"] = title
        article_json["industryName"] = industryName
        article_json["author"] = ""
        article_json["pub_date"] = pub_date
        article_json["pub_year"] = pub_year
        article_json["keywords"] = keywords
        # pdf_url = f'https://www.iresearch.cn/include/ajax/user_ajax.ashx?work=idown&rid={li["NewsId"]}'
        # article_json["pdf_url"] = pdf_url
        temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
        di_model_next.lists.append(temp)

    result.next_dicts.insert.append(di_model_next)

    return result


def cqvipqyreport_wkaskciarticle_callback(callmodel: CallBackModel[OtherArticleModel]) -> DealModel:
    result = DealModel()
    return result



def cqvipqyreport_wkaskciarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts

    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])

    data = create_base_report_dic()
    sub_db_id = "00780"
    html = para_dicts["data"]["1_1"]["html"]
    sel = Selector(html, "html")
    details = sel.css('div[class="show_detail"]')
    if details:
        title = details.css("div[class='show_title'] a::text").get()
        pdf_url_before = sel.css("div[id='insert-pdfjs'] iframe::attr(src)").get()
        if pdf_url_before:
            rule = "pdfpath=(.*?\.pdf)"
            pdf_url_list = re.findall(rule, pdf_url_before)
            encoded_url = pdf_url_list[0]
            pdf_url = unquote(encoded_url)
        else:
            pdf_url = ""
    else:
        details = sel.css('div[class="margin_box"]')
        title = details.css("h1::text").get()
        # 在这种网页结构下, pdf的url是指向登录获取
        pdf_url = ""



    cover_url = ""
    keywords = article_json["keywords"]
    # abstract = check_none(datainfo.get("Content"))
    # abstract = abstract.replace("<p>", "")
    abstract = ""
    # fulltext = datainfo["fulltext"]
    # title_alt = datainfo["title_alt"]

    src_data = down_model["1_1"]
    fulltext_type = ""
    # if checkExist(fulltext):
    #     fulltext_type += ";html"
    if checkExist(pdf_url):
        if pdf_url.startswith("http"):
            ext_name_raw = pdf_url.split(".")[-1]
            ext_name = ext_name_format(ext_name_raw)
            fulltext_type += f";{ext_name}"
    # if checkExist(cover_url):
    #     fulltext_type += ";pic"
    down_date = src_data.down_date.split(" ")[0].replace("-", "")
    data["rawid"] = sql_model["rawid"]
    rawid = data["rawid"]
    data["rawid_mysql"] = sql_model["rawid"]
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)

    data["lngid"] = lngid
    data["keyid"] = lngid
    print(lngid)

    data["title"] = title.replace("&nbsp", "").replace("#br#", "").strip()
    # data["num"] = article_json["num"]
    data["source_type"] = "12"
    data["product"] = "VIPQY"
    data["sub_db"] = "REPORT"
    data["provider"] = "VIP"
    # sub_db_judge 为自留字段, 因为所有报告全部使用一个sub_db_id, 因此, 为了区分这里面的库, 使用这个不上数仓的自留字段存放这个自定义库的编号, 编号由我自己定的, 且这个编号作为rawid的一份子
    data["sub_db_judge"] = "C01"

    # data["zt_provider"] = "cmacmcjournal"
    data["sub_db_id"] = sub_db_id
    data["is_deprecated"] = "0"
    data["provider_url"] = article_json["url"]
    author = article_json["author"]
    data["author"] = author
    data["author_1st"] = author.split(";")[0].replace("[1]", "").replace("[2]", "").strip()
    data["organ"] = check_none(article_json.get("orgSName"))
    pub_date = check_none(article_json.get("pub_date")).replace("-", "")
    data["pub_date"] = pub_date
    data["down_date"] = down_date
    data["latest_date"] = down_date
    data["abstract"] = abstract
    data["keyword"] = keywords
    industry_second = article_json.get("industryName", "")
    industry_first = solve_indust(industry_second)
    subject = f"{industry_first}@{industry_second}"
    if subject == "@":
        subject = ""
    data["subject"] = subject.strip("@")
    data["sub_db_class_name"] = industry_second
    data["fulltext_type"] = fulltext_type.lstrip(";")
    pub_year = pub_date[0:4]
    data["pub_year"] = pub_year
    batch = time.strftime("%Y%m%d_%H%M%S", time.localtime(time.time()))
    data["batch"] = batch
    data["pdf_url"] = pdf_url
    data["cover_url"] = cover_url

    save_data.append({'table': 'cqvipqyreport_latest_new', 'data': data})

    # html_dic = creat_base_html_dic()
    # html_dic["_id"] = f"{lngid}_content.html"
    # html_dic["filename"] = f"{lngid}_content.html"
    # html_dic["keyid"] = lngid
    # html_dic["lngid"] = lngid
    # html_dic["sub_db_id"] = sub_db_id
    # html_dic["raw_source_url"] = article_json["url"]
    # html_dic["source_type"] = "12"
    # html_dic["latest_date"] = down_date
    # html_dic["is_deprecated"] = "0"
    # html_dic["fulltext_type"] = "html"
    # html_dic["fulltext_size"] = len(fulltext)
    # html_dic["fulltext_txt"] = fulltext
    # html_dic["page_cnt"] = 1
    # html_dic["pub_year"] = pub_year
    # save_data.append({'table': 'cqvipqyreport_fulltext_latest_new', 'data': html_dic})

    result.save_data = save_data
    # with open("test1.json", mode="w", encoding="utf-8") as f:
    #     f.write(json.dumps(save_data, ensure_ascii=False))
    # print(result)
    return result


def cqvipqyreport_baogaotinglist_callback(callmodel: CallBackModel[JournalHomeModel]) -> DealModel:
    result = DealModel()

    # rule = re.compile(r"http://(.*?).cma-cmc.com.cn")
    para_dicts = callmodel.para_dicts
    # print(para_dicts)
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    sql_model = callmodel.sql_model
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next,
                  "sub_db_id": sql_model.sub_db_id
                  }
    data = para_dicts['data']['1_1']['html']
    page_index = int(callmodel.sql_model.page_index)
    di_model_bef = DealInsertModel()
    di_model_bef.insert_pre = CoreSqlValue.replace_it
    sql_dict = callmodel.sql_model.dict()
    sql_dict = deal_sql_dict(sql_dict)
    list_json = sql_dict["list_json"]
    list_json_dict = json.loads(list_json)
    sel = Selector(data, "html")
    if page_index == 1:
        di_model_bef = DealInsertModel()
        di_model_bef.insert_pre = CoreSqlValue.replace_it

        total_pages_str = sel.css('span[class="fontC02"]::text').get()
        if total_pages_str:
            rule = "共(.*?)页"
            total_page = re.sub(rule, "\\1", total_pages_str)
        else:
            raise Exception

        sql_dict = callmodel.sql_model.dict()
        sql_dict = deal_sql_dict(sql_dict)
        for page in range(2, int(total_page) + 1):
            sql_dict["page"] = total_page
            sql_dict["page_index"] = page
            sql_dict["list_json"] = json.dumps(list_json_dict, ensure_ascii=False)
            di_model_bef.lists.append(sql_dict.copy())
            if len(di_model_bef.lists) > 10000:
                result.befor_dicts.insert.append(di_model_bef)
                di_model_bef = DealInsertModel()
                di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
        result.befor_dicts.insert.append(di_model_bef)


    di_model_next = DealInsertModel()
    di_model_next.insert_pre = CoreSqlValue.insert_ig_it
    li_list = sel.css('section[class="content"] div[class="row acenter"] ul')
    for li in li_list:
        temp = info_dicts.copy()
        article_json = dict()
        temp["task_tag"] = temp["task_tag_next"]
        del temp["task_tag_next"]
        href = li.css('li:first-of-type a::attr(href)').get()
        rawid = href.split("/")[-1] + "_C02"
        orgName = ""
        # orgSName = li["orgSName"]
        industryName = ""
        title = li.css('li:first-of-type a p::text').get()
        # Author = li["Author"].replace(",", ";")
        keywords_list = li.css('li:last-of-type .detail_label::text').getall()
        new_ke_list = list()
        for one in keywords_list:
            one = one.replace("#", "").strip()
            new_ke_list.append(one)
        keywords = ";".join(new_ke_list)
        publishDate = li.css('li:last-of-type span[class="fontC05 fontS13"]::text').get()
        pub_date = format_date(publishDate, "%Y-%m-%d", "%Y%m%d_%H%M%S")
        pub_year = publishDate[0:4]
        # detail_url = f'https://www.iresearch.com.cn/Detail/report?id={li["NewsId"]}&isfree=0'
        base_url = "https://www.baogaoting.com/infolecture/list"
        url = make_url(href, base_url)
        temp["rawid"] = rawid
        article_json["orgName"] = orgName
        article_json["orgSName"] = ""
        # article_json["detail_url"] = detail_url
        article_json["url"] = url
        article_json["title"] = title
        article_json["industryName"] = industryName
        article_json["author"] = ""
        article_json["pub_date"] = pub_date
        article_json["pub_year"] = pub_year
        article_json["keywords"] = keywords
        # pdf_url = f'https://www.iresearch.cn/include/ajax/user_ajax.ashx?work=idown&rid={li["NewsId"]}'
        # article_json["pdf_url"] = pdf_url
        temp["article_json"] = json.dumps(article_json, ensure_ascii=False)
        di_model_next.lists.append(temp)

    result.next_dicts.insert.append(di_model_next)

    return result


def cqvipqyreport_baogaotingarticle_callback(callmodel: CallBackModel[OtherArticleModel]) -> DealModel:
    result = DealModel()
    return result



def cqvipqyreport_baogaotingarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()
    para_dicts = callmodel.para_dicts

    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    article_json = json.loads(sql_model["article_json"])

    data = create_base_report_dic()
    sub_db_id = "00780"
    html = para_dicts["data"]["1_1"]["html"]
    sel = Selector(html, "html")
    # title = sel.css("div[class='show_title'] a::text").get()
    title = article_json["title"]
    pdf_url_before = sel.css("div[id='detail'] iframe::attr(src)").get()
    if pdf_url_before:
        rule = "file=(.*?\.pdf)"
        pdf_url_list = re.findall(rule, pdf_url_before)
        pdf_url = pdf_url_list[0]
    else:
        pdf_url = ""


    cover_url = ""
    keywords = article_json["keywords"]
    # abstract = check_none(datainfo.get("Content"))
    # abstract = abstract.replace("<p>", "")
    abstract = ""
    # fulltext = datainfo["fulltext"]
    # title_alt = datainfo["title_alt"]

    src_data = down_model["1_1"]
    fulltext_type = ""
    # if checkExist(fulltext):
    #     fulltext_type += ";html"
    if checkExist(pdf_url):
        if pdf_url.startswith("http"):
            ext_name_raw = pdf_url.split(".")[-1]
            ext_name = ext_name_format(ext_name_raw)
            fulltext_type += f";{ext_name}"

    # if checkExist(cover_url):
    #     fulltext_type += ";pic"
    down_date = src_data.down_date.split(" ")[0].replace("-", "")
    data["rawid"] = sql_model["rawid"]
    rawid = data["rawid"]
    data["rawid_mysql"] = sql_model["rawid"]
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)

    data["lngid"] = lngid
    data["keyid"] = lngid
    print(lngid)

    data["title"] = title.replace("&nbsp", "").replace("#br#", "").strip()
    # data["num"] = article_json["num"]
    data["source_type"] = "12"
    data["product"] = "VIPQY"
    data["sub_db"] = "REPORT"
    data["provider"] = "VIP"
    # sub_db_judge 为自留字段, 因为所有报告全部使用一个sub_db_id, 因此, 为了区分这里面的库, 使用这个不上数仓的自留字段存放这个自定义库的编号, 编号由我自己定的, 且这个编号作为rawid的一份子
    data["sub_db_judge"] = "C02"

    # data["zt_provider"] = "cmacmcjournal"
    data["sub_db_id"] = sub_db_id
    data["is_deprecated"] = "0"
    data["provider_url"] = article_json["url"]
    author = article_json["author"]
    data["author"] = author
    data["author_1st"] = author.split(";")[0].replace("[1]", "").replace("[2]", "").strip()
    data["organ"] = check_none(article_json.get("orgSName"))
    pub_date = check_none(article_json.get("pub_date")).replace("-", "")
    data["pub_date"] = pub_date
    data["down_date"] = down_date
    data["latest_date"] = down_date
    data["abstract"] = abstract
    data["keyword"] = keywords
    industry_second = article_json.get("industryName", "")
    industry_first = solve_indust(industry_second)
    subject = f"{industry_first}@{industry_second}"
    if subject == "@":
        subject = ""
    data["subject"] = subject.strip("@")
    data["sub_db_class_name"] = industry_second
    data["fulltext_type"] = fulltext_type.lstrip(";")
    pub_year = pub_date[0:4]
    data["pub_year"] = pub_year
    batch = time.strftime("%Y%m%d_%H%M%S", time.localtime(time.time()))
    data["batch"] = batch
    data["pdf_url"] = pdf_url
    data["cover_url"] = cover_url

    save_data.append({'table': 'cqvipqyreport_latest_new', 'data': data})


    result.save_data = save_data

    return result

