import copy
import datetime
import json
import time
import math
import re
import traceback
import urllib
from urllib import parse

from parsel import Selector
from re_common.baselibrary.database.mysql import json_update
from re_common.baselibrary.utils.basedict import BaseDicts
from re_common.baselibrary.utils.basetime import BaseTime
from re_common.baselibrary.utils.baseurl import BaseUrl
from re_common.vip.baseencodeid import BaseLngid

from apps.crawler_platform.core_platform.core_sql import CoreSqlValue
from apps.crawler_platform.core_platform.g_model import DealModel, CallBackModel, JournalListModel, JournalIssueModel, \
    JournalArticleModel, DealInsertModel, JournalHomeModel, DealUpdateModel, OperatorSqlModel, DealItemModel, \
    EtlDealModel, JournalVolumeModel


__all__ = [
    "medjournals_medjournalshome_callback",
    "medjournals_medjournalslist_callback",
    "medjournals_medjournalsvolumes_callback",
    "medjournals_medjournalsvolume_callback",
    "medjournals_medjournalsissue_callback",
    "medjournals_medjournalsOaarticle_callback",
    "medjournals_medjournalsarticle_callback",
    "medjournals_medjournalsOaarticle_etl_callback",
    "medjournals_medjournalsarticle_etl_callback",
]


def medjournals_medjournalshome_callback(callmodel: CallBackModel[JournalHomeModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}

    if "1_1" in para_dicts["data"]:
        data = callmodel.para_dicts['data']['1_1']['data']

        sql_dict = callmodel.sql_model.dict()
        di_model_bef = DealInsertModel()
        di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
        sql_dict.pop("id")
        sql_dict.pop("update_time")
        sql_dict.pop("create_time")
        sql_dict.pop("null_dicts")
        sql_dict.pop("err_msg")
        sql_dict.pop("other_dicts")
        sql_dict.pop("state")
        sql_dict.pop("failcount")
        if callmodel.sql_model.page_index == '1':
            total_page = math.ceil(data['count']/10)
            for page in range(1, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)

        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        # 有两本 https://journals.lww.com/bls/pages/default.aspx  https://journals.lww.com/cd/pages/default.aspx
        host_list = ['cvjc.org', 'chinagp', 'cmcr.yiigle', 'journals.elsevier', 'journals.lww', 'sciencedirect', 'radmp.org', 'rheumatology-autoimmunity', 'chinamedicinej', 'onlinelibrary.wiley']
        for item in data['items']:
            journal_json = dict()
            temp = info_dicts.copy()
            # url = item['ysjOfficialUrl']
            url = item['journalWebUrl']
            if any(host in url for host in host_list):
                continue
            temp["task_tag"] = 'medjournalslist'
            journal_json["url"] = url
            journal_json["ysjOa"] = item.get('journalOaModeCode', '')
            journal_json["ysjCnAllName"] = item.get('journalName', '')
            # journal_json["ysjCnShortName"] = item.get('ysjCnShortName', '')
            journal_json["ysjEnAllName"] = item.get('journalNameEn', '')
            # journal_json["ysjEnShortName"] = item.get('ysjEnShortName', '')
            journal_json["ysjIssnNumber"] = item.get('journalISSN', '')
            journal_json["ysjCnNumber"] = item.get('journalCN', '')
            journal_json["ysjChiefEditor"] = item.get('journalEditorInChief', '')
            journal_json["ysjPrincipal"] = item.get('journalDirector', '')
            journal_json["ysjEmail"] = item.get('journalEmail', '')
            journal_json["ysjHost"] = item.get('journalSponsor', '')
            journal_json["ysjFoundedDate"] = item.get('journalStartYear', '')
            journal_json["ysjAddress"] = item.get('journalAddress', '')
            # journal_json["ysjLanguage"] = item.get('ysjLanguage', '')
            journal_json["ysjPublicationPeriod"] = item.get('journalFREQ', '')
            journal_json["ysjNewestPrice"] = item.get('journalPrice', '')

            temp["sub_db_id"] = ''
            del temp["task_tag_next"]
            part = url.split('//')[-1].split('/')[0].strip()
            temp["journal_rawid"] = part
            temp["journal_json"] = json.dumps(journal_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

            du_model = DealUpdateModel()

            # du_model.update_no_placeholder.update({"page": max_page})
            du_model.update.update({
                "is_active": 1})
            du_model.where.update({"journal_rawid": part,
                                   "task_tag": temp["task_tag"],
                                   "task_name": callmodel.sql_model.task_name
                                   # "is_active": "0"
                                   })
            result.next_dicts.update_list.append(du_model)
        result.next_dicts.insert.append(di_model_next)

    return result


def medjournals_medjournalslist_callback(callmodel: CallBackModel[JournalListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html = callmodel.para_dicts['data']['1_1']['html']
        res = Selector(text=html)

        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        years_list = res.xpath('//ul[@id="year_list"]/li/a/@href').extract()
        for years in years_list:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            temp["sub_db_id"] = ''
            del temp["task_tag_next"]
            year = years.split('=')[-1]
            temp["journal_rawid"] = callmodel.sql_model.journal_rawid
            temp["pub_year"] = year
            temp["num"] = 0
            temp["volume_json"] = callmodel.sql_model.journal_json
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result


def medjournals_medjournalsvolumes_callback(callmodel: CallBackModel[JournalVolumeModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html = callmodel.para_dicts['data']['1_1']['html']
        res = Selector(text=html)

        sql_dict = callmodel.sql_model.dict()
        di_model_bef = DealInsertModel()
        di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
        sql_dict.pop("id")
        sql_dict.pop("update_time")
        sql_dict.pop("create_time")
        sql_dict.pop("null_dicts")
        sql_dict.pop("err_msg")
        sql_dict.pop("other_dicts")
        sql_dict.pop("state")
        sql_dict.pop("failcount")
        sql_dict.pop("page")
        years_list = res.xpath('//ul[@id="year_style"]/li/a/text()').extract()
        for year in years_list:
            sql_dict["task_tag"] = 'medjournalsvolume'
            sql_dict["journal_rawid"] = callmodel.sql_model.journal_rawid
            sql_dict["pub_year"] = year
            sql_dict["num"] = 0
            di_model_bef.lists.append(sql_dict.copy())
        result.befor_dicts.insert.append(di_model_bef)

    return result


def medjournals_medjournalsvolume_callback(callmodel: CallBackModel[JournalVolumeModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        data = callmodel.para_dicts['data']['1_1']
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        for item in data:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            temp["sub_db_id"] = ''
            del temp["task_tag_next"]
            issue_json = json.loads(callmodel.sql_model.volume_json)
            temp["journal_rawid"] = callmodel.sql_model.journal_rawid
            temp["pub_year"] = item.get('issueYear', '')
            temp["num"] = item.get('channelPath', '')
            # if issue_json['ysjOa'] != 0:
            #     temp["state"] = -10
            temp["page_index"] = 0
            issue_json["fmUrl"] = item.get('fmUrl', '')
            issue_json["mlUrl"] = item.get('mlUrl', '')
            issue_json["issue_url"] = item.get('url', '')
            issue_json["issueYear"] = item.get('issueYear', '')
            issue_json["issueVol"] = item.get('issueVol', '')
            issue_json["issueIssue"] = item.get('issueIssue', '')
            issue_json["channelName"] = item.get('channelName', '')
            issue_json["issuePubDate"] = item.get('issuePubDate', '')
            temp["issue_json"] = json.dumps(issue_json, ensure_ascii=False)
            di_model_next.lists.append(temp)
        result.next_dicts.insert.append(di_model_next)

    return result



# def format_organ(organ_list):
#     # 应WC要求, 对机构字段进行格式化规整, 但因为存在未知格式, 因此正则必然会出现错误(这些问题都和WC交流过)
#     new_data_list = list()
#     rule = re.compile("](\d{6})　(.*?)，")
#     rule1 = re.compile("](\d{6})")
#     for organ_alt in organ_list:
#         organ_data = rule.findall(organ_alt)
#         if organ_data:
#             organ_tup = organ_data[0]
#             rep_str = f"{organ_tup[0]}　{organ_tup[1]}，"
#             new_organ = organ_alt.replace(rep_str, "").replace("　", "").replace("，", ",")
#             organ = new_organ + f",{organ_tup[1]}{organ_tup[0]}"
#             new_data_list.append(organ)
#         else:
#             organ_data1 = rule1.findall(organ_alt)
#             if organ_data1:
#                 organ_tup = organ_data1[0]
#                 new_organ = organ_alt.replace(organ_tup, "").replace("，", ",")
#                 organ = new_organ + f",{organ_tup}"
#                 new_data_list.append(organ)
#             else:
#                 new_data_list.append(organ_alt.replace("　", "").replace("，", ","))
#     organs = ";".join(new_data_list)
#     return organs



def medjournals_medjournalsissue_callback(callmodel: CallBackModel[JournalIssueModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html = callmodel.para_dicts['data']['1_1']['html']
        res = Selector(text=html)

        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        div_list = res.xpath('//div[contains(@class,"current_content_article_body")]')
        for div in div_list:
            temp = info_dicts.copy()
            del temp["task_tag_next"]
            article_json = json.loads(callmodel.sql_model.issue_json)
            if article_json['ysjOa'] == 0:
                temp["task_tag"] = 'medjournalsOaarticle'
            else:
                temp["task_tag"] = 'medjournalsarticle'
            url = div.xpath('p[@class="Current_Title"]/a/@href').extract_first()
            title = div.xpath('p[@class="Current_Title"]/a/@title').extract_first()
            author = div.xpath('p[@class="current_author"]/text()').extract_first()
            current_tip = ' '.join(div.xpath('p[@class="current_tip"]//text()').extract())
            down_cnt = ' '.join(div.xpath('ul/li[2]/span[2]/text()').extract())
            pv_cnt = ' '.join(div.xpath('ul/li[1]/span[2]/text()').extract())
            rawid = re.findall('(.*?).htm', url.split('/')[-1])[0]
            temp["rawid"] = rawid
            temp["sub_db_id"] = '00523'
            article_json["article_url"] = url
            article_json["title"] = title
            article_json["author"] = author
            article_json["current_tip"] = current_tip
            article_json["down_cnt"] = down_cnt
            article_json["pv_cnt"] = pv_cnt
            temp["article_info_json"] = json.dumps(article_json, ensure_ascii=False)
            di_model_next.lists.append(temp)

        result.next_dicts.insert.append(di_model_next)

    return result


def medjournals_medjournalsOaarticle_callback(callmodel: CallBackModel[JournalArticleModel]) -> DealModel:
    result = DealModel()
    return result


def medjournals_medjournalsOaarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    res = Selector(text=html)
    article_json = json.loads(callmodel.sql_model.article_info_json)

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '00523'
    product = 'MEDJOURNALS'
    provider = 'CMA'
    sub_db = 'QK'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = product
    data['sub_db'] = sub_db
    data['sub_db_id'] = sub_db_id
    data['provider'] = provider
    data['zt_provider'] = 'cmamedjournal'
    data['source_type'] = '3'
    data['latest_date'] = down_date_str[:8]
    data['down_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    language = article_json.get("ysjLanguage", "")
    if language == '中文':
        data['language'] = 'ZH'
    elif language == "":
        data['language'] = 'ZH'
    else:
        data['language'] = 'EN'
    data['title'] = ' '.join(res.xpath('//div[@class="main_title"]/text()').extract()).strip()
    data['title_alt'] = ' '.join(res.xpath('//div[@class="en_tit"]/text()').extract()).strip()
    data['provider_url'] = article_json['article_url']
    data['down_cnt'] = article_json['down_cnt']
    data['issn'] = article_json['ysjIssnNumber']
    data['cnno'] = article_json['ysjCnNumber']
    pub_year = article_json['issueYear']
    data['pub_year'] = pub_year
    data['pub_date'] = article_json['issuePubDate'].replace('-', '')
    data['pub_place'] = article_json['ysjAddress']
    data['vol'] = article_json['issueVol']
    data['num'] = article_json['issueIssue']
    data['journal_raw_id'] = article_json['url'].split('//')[-1].split('/')[0]
    data['journal_name'] = article_json['ysjCnAllName']
    data['journal_name_alt'] = article_json['ysjEnAllName']
    # data['type_name'] = article_json['ysjPublicationPeriod']
    # data['price'] = article_json['ysjNewestPrice']
    # data['pv_cnt'] = f"{article_json['pv_cnt']}@{ down_date_str[:8]}"
    data['down_cnt'] = f"{article_json['down_cnt']}@{ down_date_str[:8]}"
    data['doi'] = article_json['current_tip'].split('DOI:')[-1] if 'DOI:' in article_json['current_tip'] else ''
    page_info = re.findall('\(.*?\):(.*?) ', article_json['current_tip'].replace('\n', ''))[0]
    data['page_info'] = page_info
    if ',' in page_info:
        data['jump_page'] = page_info.split(',', 1)[-1].replace(',', ';')
        page_info = page_info.split(',', 1)[0]
    data['begin_page'] = page_info.split('-')[0]
    data['end_page'] = page_info.split('-')[-1]
    data['is_oa'] = '1'
    # data['is_fa'] = '1'
    author_info_div = res.xpath('//div[contains(text(),"贡献者信息") or contains(text(),"Contributor information")]/following::div[1]/div')
    author_list = list()
    organ_dict = dict()
    for index in range(0, len(author_info_div)):
        if 'aff"' in author_info_div[index].extract():
            continue
        author_info = ''.join(author_info_div[index].xpath('span/text()').extract()).strip()
        if index + 1 >= len(author_info_div) or 'aff' not in author_info_div[index+1].extract():
            organ = ''
        else:
            organ = ''.join(author_info_div[index+1].xpath('p/text()').extract()).strip()
        if organ and organ not in organ_dict:
            organ_dict[organ] = len(organ_dict) + 1
        author = f"{author_info}[{organ_dict[organ] if organ else ''}]"
        author_list.append(author)
    organ_list = list()
    for k, v in organ_dict.items():
        organ_list.append(f"[{v}]{k}")
    data['author'] = ';'.join(author_list).replace('[]', '')
    data['author_1st'] = author_list[0].replace('[1]', '').replace('[]', '') if author_list else ''
    organ_data = ';'.join(organ_list)
    data['organ'] = organ_data.replace("　", "")
    author_alt_div = res.xpath('//span[contains(text(),"Contributor Information")]/ancestor::div[@class="en_abs"]/div')
    author_alt_list = list()
    author_raw_list = list()
    organ_alt_dict = dict()
    for index in range(1, len(author_alt_div)):
        if 'class="aff"' in author_alt_div[index].extract():
            continue
        author_alt_info = ''.join(author_alt_div[index].xpath('text()').extract()).strip()
        if index + 1 >= len(author_alt_div) or 'class="aff"' not in author_alt_div[index+1].extract():
            organ_alt = ''
        else:
            organ_alt = ''.join(author_alt_div[index + 1].xpath('.//text()').extract()).strip()
        if organ_alt and organ_alt not in organ_alt_dict:
            organ_alt_dict[organ_alt] = len(organ_alt_dict) + 1
        author_alt = f"{author_alt_info}[{organ_alt_dict[organ_alt] if organ_alt else ''}]"
        author_raw = f"{author_alt_info}{organ_alt_dict[organ_alt] if organ_alt else ''}"
        author_alt_list.append(author_alt)
        author_raw_list.append(author_raw)
    organ_alt_list = list()
    for k, v in organ_alt_dict.items():
        organ_alt_list.append(f"{v}. {k}")
    data['author_alt'] = ';'.join(author_alt_list).replace('[]', '')
    data['author_raw'] = f"{','.join(author_raw_list)} ({';'.join(organ_alt_list)})".replace(" ()", '').replace('[]', '')
    corr_author_div = res.xpath('//div[contains(text(),"通信作者") or contains(text(),"Corresponding author")]/following::div[1]/div')
    corr_author_list = list()
    email_list = list()
    for index in range(0, len(corr_author_div)):
        if 'aff"' in corr_author_div[index].extract():
            continue
        corr_author = ''.join(corr_author_div[index].xpath('span/text()').extract())
        corr_author_list.append(corr_author)
        if (index + 1) < len(corr_author_div) and 'Email' in corr_author_div[index + 1].extract():
            email = corr_author_div[index + 1].xpath('div/a/text()').extract_first()
            email_list.append(f"{email}:{corr_author}")
        if (index + 2) < len(corr_author_div) and 'Email' in corr_author_div[index + 2].extract():
            email = corr_author_div[index + 2].xpath('div/a/text()').extract_first()
            email_list.append(f"{email}:{corr_author}")
    data['corr_author'] = ';'.join(corr_author_list)
    data['email'] = ';'.join(email_list)
    keyword = ';'.join(res.xpath('//div[contains(text(),"关键词") or contains(text(),"Keywords")]/following::div[1]/a/text()').extract())
    data['keyword'] = keyword
    keyword_alt = ';'.join(res.xpath('//span[contains(text(),"Key words: ")]/following::p[1]/text()').extract()).strip()
    data['keyword_alt'] = keyword_alt
    abstract = ' '.join(res.xpath('//div[@class="article_abstract_mid"]/div//text()').extract()).strip()
    data['abstract'] = abstract
    abstract_alt = ' '.join(res.xpath('//div[@class="en_abs"][1]//text()').extract()).strip()
    data['abstract_alt'] = abstract_alt
    pub_date = ' '.join(res.xpath('//div[contains(text(),"Published Date") or contains(text(),"出版日期")]/text()').extract())
    data['pub_date'] = pub_date.split('：')[-1]
    recv_date = ' '.join(res.xpath('//div[contains(text(),"Received Date") or contains(text(),"收稿日期")]/text()').extract())
    data['recv_date'] = recv_date.split('：')[-1]
    accept_date = ' '.join(res.xpath('//div[contains(text(),"Accepted Date") or contains(text(),"接受日期")]/text()').extract())
    data['accept_date'] = accept_date.split('：')[-1]
    fund_list = res.xpath('//div[contains(text(),"Funds") or contains(text(),"基金项目")]/following::div[1]/div/p/text()').extract()
    fund = ';'.join([i.strip() for i in fund_list])
    data['fund'] = fund.replace(';', '') if fund.endswith(';') else fund
    data['column_info'] = ''.join(res.xpath('//div[@class="article_attr"]/text()').extract()).strip()
    save_data.append({'table': 'journal_latest', 'data': data})

    full_text_data = dict()
    full_text_data['lngid'] = lngid
    full_text_data['keyid'] = lngid
    full_text_data['sub_db_id'] = sub_db_id
    full_text_data['source_type'] = '3'
    full_text_data['latest_date'] = down_date_str[:8]
    full_text_data['batch'] = down_date_str
    full_text_data['is_deprecated'] = '0'
    full_text_data['filename'] = f"{lngid}.html"
    full_text_data['fulltext_type'] = "html"
    full_text_data['fulltext_addr'] = ''
    fulltext = ''.join(res.xpath('//div[@class="main_txt"]/div[@class="body"]').extract())
    full_text_data['fulltext_txt'] = fulltext
    full_text_data['fulltext_size'] = len(fulltext)
    full_text_data['page_cnt'] = "1"
    full_text_data['pub_year'] = pub_year
    if len(fulltext) != 0:
        save_data.append({'table': 'journal_fulltext_latest', 'data': full_text_data})
    ref_data = dict()
    ref_data['lngid'] = lngid
    ref_data['keyid'] = lngid
    ref_data['sub_db_id'] = sub_db_id
    ref_data['source_type'] = '3'
    ref_data['latest_date'] = down_date_str[:8]
    ref_data['batch'] = down_date_str
    ref_data['is_deprecated'] = '0'
    ref_data['pub_year'] = pub_year
    refer_info = list()
    ref_id_list = list()
    ref_div = res.xpath('//div[@class="ref"]')
    num = 0
    for div in ref_div:
        num += 1
        ref_one = dict()
        ref_one["is_deprecated"] = "0"
        ref_one["batch"] = down_date_str
        ref_one["sub_db_id"] = sub_db_id
        ref_one["product"] = product
        ref_one["sub_db"] = sub_db
        ref_one["provider"] = provider
        ref_one["down_date"] = down_date_str[:8]
        ref_one["cited_rawid"] = rawid
        ref_one["cited_lngid"] = lngid
        ref_one["sub_db_id"] = sub_db_id
        ref_lngid = "{}{}".format(lngid, str(num).zfill(4))
        ref_id_list.append(ref_lngid)
        ref_one["lngid"] = ref_lngid
        ref_one["keyid"] = ref_lngid
        ref_author_list = list()
        for span in div.xpath('div[2]/span[@class="author person_group"]/span[contains(@class,"name")]'):
            ref_author = ' '.join(span.xpath('.//text()').extract())
            ref_author_list.append(ref_author)
        ref_one["author"] = ';'.join(ref_author_list)
        ref_one["title"] = ';'.join(div.xpath('div[2]/span[@class="article_title"]/text()').extract())
        refer_text_site = ''.join(div.xpath('div[2]//text()').extract())
        ref_one["refer_text_site"] = refer_text_site
        ref_one["refer_text_raw"] = div.extract()
        strtype = re.findall('\[(.*?)\]', refer_text_site)
        ref_one["strtype"] = strtype[0] if strtype else ''
        ref_one["source_name"] = ''.join(div.xpath('div[2]/span[@class="source"]/text()').extract())
        ref_one["pub_year"] = ''.join(div.xpath('div[2]/span[@class="year"]/text()').extract())
        ref_one["vol"] = ''.join(div.xpath('div[2]/span[@class="volume"]/text()').extract())
        ref_one["num"] = ''.join(div.xpath('div[2]/span[@class="issue"]/text()').extract())
        begin_page = ''.join(div.xpath('div[2]/span[@class="fpage"]/text()').extract())
        end_page = ''.join(div.xpath('div[2]/span[@class="lpage"]/text()').extract())
        ref_one["begin_page"] = begin_page
        ref_one["end_page"] = end_page
        ref_one["page_info"] = f'{begin_page}-{end_page}'
        refer_info.append(ref_one)
    ref_data['ref_id'] = ';'.join(ref_id_list)
    ref_data['ref_cnt'] = str(len(ref_id_list))
    ref_data['refer_info'] = refer_info
    save_data.append({'table': 'journal_ref_latest', 'data': ref_data})

    # data['ref_id'] = ';'.join(ref_id_list)
    # data['ref_cnt'] = len(ref_id_list)
    # save_data.append({'table': 'journal_latest', 'data': data})
    result.save_data = save_data
    return result


def medjournals_medjournalsarticle_callback(callmodel: CallBackModel[JournalArticleModel]) -> DealModel:
    result = DealModel()
    return result


def medjournals_medjournalsarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    res = Selector(text=html)
    article_json = json.loads(callmodel.sql_model.article_info_json)

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '00523'
    product = 'MEDJOURNALS'
    provider = 'CMA'
    sub_db = 'QK'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = product
    data['sub_db'] = sub_db
    data['sub_db_id'] = sub_db_id
    data['provider'] = provider
    data['zt_provider'] = 'cmamedjournal'
    data['source_type'] = '3'
    data['latest_date'] = down_date_str[:8]
    data['down_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data['country'] = 'CN'
    language = article_json.get("ysjLanguage", "")
    if language == '中文':
        data['language'] = 'ZH'
    elif language == "":
        data['language'] = 'ZH'
    else:
        data['language'] = 'EN'
    data['title'] = ' '.join(res.xpath('//div[@class="main_title"]/text()').extract()).strip()
    data['title_alt'] = ' '.join(res.xpath('//div[@class="en_tit"]/text()').extract()).strip()
    data['provider_url'] = article_json['article_url']
    data['down_cnt'] = article_json['down_cnt']
    data['issn'] = article_json['ysjIssnNumber']
    data['cnno'] = article_json['ysjCnNumber']
    pub_year = article_json['issueYear']
    data['pub_year'] = pub_year
    data['pub_date'] = article_json['issuePubDate'].replace('-', '')
    data['pub_place'] = article_json['ysjAddress']
    data['vol'] = article_json['issueVol']
    data['num'] = article_json['issueIssue']
    data['journal_raw_id'] = article_json['url'].split('//')[-1].split('/')[0]
    data['journal_name'] = article_json['ysjCnAllName']
    data['journal_name_alt'] = article_json['ysjEnAllName']
    # data['type_name'] = article_json['ysjPublicationPeriod']
    # data['price'] = article_json['ysjNewestPrice']
    # data['pv_cnt'] = f"{article_json['pv_cnt']}@{ down_date_str[:8]}"
    data['down_cnt'] = f"{article_json['down_cnt']}@{ down_date_str[:8]}"
    data['doi'] = article_json['current_tip'].split('DOI:')[-1] if 'DOI:' in article_json['current_tip'] else ''
    if 'null' in data['doi']:
        data['doi'] = ''
    page_info = re.findall('\(.*?\):(.*?) ', article_json['current_tip'].replace('\n', ''))[0]
    data['page_info'] = page_info
    if ',' in page_info:
        data['jump_page'] = page_info.split(',', 1)[-1].replace(',', ';')
        page_info = page_info.split(',', 1)[0]
    data['begin_page'] = page_info.split('-')[0]
    data['end_page'] = page_info.split('-')[-1]
    data['is_oa'] = '0'
    # data['is_fa'] = '1'
    author_info_div = res.xpath('//div[contains(text(),"贡献者信息") or contains(text(),"Contributor information")]/following::div[1]/div')
    author_list = list()
    organ_dict = dict()
    for index in range(0, len(author_info_div)):
        if 'aff"' in author_info_div[index].extract():
            continue
        author_info = ''.join(author_info_div[index].xpath('span/text()').extract()).strip()
        if index + 1 >= len(author_info_div) or 'aff' not in author_info_div[index+1].extract():
            organ = ''
        else:
            organ = ''.join(author_info_div[index+1].xpath('p/text()').extract()).strip()
        if organ and organ not in organ_dict:
            organ_dict[organ] = len(organ_dict) + 1
        author = f"{author_info}[{organ_dict[organ] if organ else ''}]"
        author_list.append(author)
    organ_list = list()
    for k, v in organ_dict.items():
        organ_list.append(f"[{v}]{k}")
    data['author'] = ';'.join(author_list).replace('[]', '')
    data['author_1st'] = author_list[0].replace('[1]', '').replace('[]', '') if author_list else ''
    organ_data = ';'.join(organ_list)
    data['organ'] = organ_data.replace("　", "")
    author_alt_div = res.xpath('//span[contains(text(),"Contributor Information")]/ancestor::div[@class="en_abs"]/div')
    author_alt_list = list()
    author_raw_list = list()
    organ_alt_dict = dict()
    for index in range(1, len(author_alt_div)):
        if 'class="aff"' in author_alt_div[index].extract():
            continue
        author_alt_info = ''.join(author_alt_div[index].xpath('text()').extract()).strip()
        if index + 1 >= len(author_alt_div) or 'class="aff"' not in author_alt_div[index+1].extract():
            organ_alt = ''
        else:
            organ_alt = ''.join(author_alt_div[index + 1].xpath('.//text()').extract()).strip()
        if organ_alt and organ_alt not in organ_alt_dict:
            organ_alt_dict[organ_alt] = len(organ_alt_dict) + 1
        author_alt = f"{author_alt_info}[{organ_alt_dict[organ_alt] if organ_alt else ''}]"
        author_raw = f"{author_alt_info}{organ_alt_dict[organ_alt] if organ_alt else ''}"
        author_alt_list.append(author_alt)
        author_raw_list.append(author_raw)
    organ_alt_list = list()
    for k, v in organ_alt_dict.items():
        organ_alt_list.append(f"{v}. {k}")
    data['author_alt'] = ';'.join(author_alt_list).replace('[]', '')
    data['author_raw'] = f"{','.join(author_raw_list)} ({';'.join(organ_alt_list)})".replace(" ()", '').replace('[]', '')
    corr_author_div = res.xpath('//div[contains(text(),"通信作者") or contains(text(),"Corresponding author")]/following::div[1]/div')
    corr_author_list = list()
    email_list = list()
    for index in range(0, len(corr_author_div)):
        if 'aff"' in corr_author_div[index].extract():
            continue
        corr_author = ''.join(corr_author_div[index].xpath('span/text()').extract())
        corr_author_list.append(corr_author)
        if (index + 1) < len(corr_author_div) and 'Email' in corr_author_div[index + 1].extract():
            email = corr_author_div[index + 1].xpath('div/a/text()').extract_first()
            email_list.append(f"{email}:{corr_author}")
        if (index + 2) < len(corr_author_div) and 'Email' in corr_author_div[index + 2].extract():
            email = corr_author_div[index + 2].xpath('div/a/text()').extract_first()
            email_list.append(f"{email}:{corr_author}")
    data['corr_author'] = ';'.join(corr_author_list)
    data['email'] = ';'.join(email_list)
    keyword = ';'.join(res.xpath('//div[contains(text(),"关键词") or contains(text(),"Keywords")]/following::div[1]/a/text()').extract())
    data['keyword'] = keyword
    keyword_alt = ';'.join(res.xpath('//span[contains(text(),"Key words: ")]/following::p[1]/text()').extract()).strip()
    data['keyword_alt'] = keyword_alt
    abstract = ' '.join(res.xpath('//div[@class="article_abstract_mid"]/div//text()|//div[@class="article_abstract_mid"]/p//text()').extract()).strip()
    data['abstract'] = abstract
    abstract_alt = ' '.join(res.xpath('//div[@class="en_abs"][1]//text()').extract()).strip()
    data['abstract_alt'] = abstract_alt.replace("\t", "").replace("\n", "")
    pub_date = ' '.join(res.xpath('//div[contains(text(),"Published Date") or contains(text(),"出版日期")]/text()').extract())
    data['pub_date'] = pub_date.split('：')[-1]
    recv_date = ' '.join(res.xpath('//div[contains(text(),"Received Date") or contains(text(),"收稿日期")]/text()').extract())
    data['recv_date'] = recv_date.split('：')[-1]
    accept_date = ' '.join(res.xpath('//div[contains(text(),"Accepted Date") or contains(text(),"接受日期")]/text()').extract())
    data['accept_date'] = accept_date.split('：')[-1]
    fund_list = res.xpath('//div[contains(text(),"Funds") or contains(text(),"基金项目")]/following::div[1]/div/p/text()').extract()
    fund = ';'.join([i.strip() for i in fund_list])
    data['fund'] = fund.replace(';', '') if fund.endswith(';') else fund
    data['column_info'] = ''.join(res.xpath('//div[@class="article_attr"]/text()').extract()).strip()
    save_data.append({'table': 'journal_latest', 'data': data})

    # full_text_data = dict()
    # full_text_data['lngid'] = lngid
    # full_text_data['keyid'] = lngid
    # full_text_data['sub_db_id'] = sub_db_id
    # full_text_data['source_type'] = '16'
    # full_text_data['latest_date'] = down_date_str[:8]
    # full_text_data['batch'] = down_date_str
    # full_text_data['is_deprecated'] = '0'
    # full_text_data['filename'] = f"{lngid}.html"
    # full_text_data['fulltext_type'] = "html"
    # full_text_data['fulltext_addr'] = ''
    # full_text_data['fulltext_size'] = ''
    # fulltext = ''.join(res.xpath('//div[@class="main_txt"]/div[@class="body"]').extract())
    # full_text_data['fulltext_txt'] = fulltext
    # full_text_data['page_cnt'] = "1"
    # full_text_data['pub_year'] = pub_year
    # save_data.append({'table': 'journal_fulltext_latest', 'data': full_text_data})
    #
    # ref_data = dict()
    # ref_data['lngid'] = lngid
    # ref_data['keyid'] = lngid
    # ref_data['sub_db_id'] = sub_db_id
    # ref_data['source_type'] = '16'
    # ref_data['latest_date'] = down_date_str[:8]
    # ref_data['batch'] = down_date_str
    # ref_data['is_deprecated'] = '0'
    # ref_data['pub_year'] = pub_year
    # refer_info = list()
    # ref_id_list = list()
    # ref_div = res.xpath('//div[@class="ref"]')
    # num = 0
    # for div in ref_div:
    #     num += 1
    #     ref_one = dict()
    #     ref_one["is_deprecated"] = "0"
    #     ref_one["batch"] = down_date_str
    #     ref_one["sub_db_id"] = sub_db_id
    #     ref_one["product"] = product
    #     ref_one["sub_db"] = sub_db
    #     ref_one["provider"] = provider
    #     ref_one["down_date"] = down_date_str[:8]
    #     ref_one["cited_rawid"] = rawid
    #     ref_one["cited_lngid"] = lngid
    #     ref_one["sub_db_id"] = sub_db_id
    #     ref_lngid = "{}{}".format(lngid, str(num).zfill(4))
    #     ref_id_list.append(ref_lngid)
    #     ref_one["lngid"] = ref_lngid
    #     ref_one["keyid"] = ref_lngid
    #     ref_author_list = list()
    #     for span in div.xpath('div[2]/span[@class="author person_group"]/span[contains(@class,"name")]'):
    #         ref_author = ' '.join(span.xpath('.//text()').extract())
    #         ref_author_list.append(ref_author)
    #     ref_one["author"] = ';'.join(ref_author_list)
    #     ref_one["title"] = ';'.join(div.xpath('div[2]/span[@class="article_title"]/text()').extract())
    #     refer_text_site = ''.join(div.xpath('div[2]//text()').extract())
    #     ref_one["refer_text_site"] = refer_text_site
    #     ref_one["refer_text_raw"] = div.extract()
    #     strtype = re.findall('\[(.*?)\]', refer_text_site)
    #     ref_one["strtype"] = strtype[0] if strtype else ''
    #     ref_one["source_name"] = ''.join(div.xpath('div[2]/span[@class="source"]/text()').extract())
    #     ref_one["pub_year"] = ''.join(div.xpath('div[2]/span[@class="year"]/text()').extract())
    #     ref_one["vol"] = ''.join(div.xpath('div[2]/span[@class="volume"]/text()').extract())
    #     ref_one["num"] = ''.join(div.xpath('div[2]/span[@class="issue"]/text()').extract())
    #     begin_page = ''.join(div.xpath('div[2]/span[@class="fpage"]/text()').extract())
    #     end_page = ''.join(div.xpath('div[2]/span[@class="lpage"]/text()').extract())
    #     ref_one["begin_page"] = begin_page
    #     ref_one["end_page"] = end_page
    #     ref_one["page_info"] = f'{begin_page}-{end_page}'
    #     refer_info.append(ref_one)
    # ref_data['ref_id'] = ';'.join(ref_id_list)
    # ref_data['ref_cnt'] = str(len(ref_id_list))
    # ref_data['refer_info'] = refer_info
    # save_data.append({'table': 'journal_ref_latest', 'data': ref_data})

    # data['ref_id'] = ';'.join(ref_id_list)
    # data['ref_cnt'] = len(ref_id_list)
    # save_data.append({'table': 'journal_latest', 'data': data})
    result.save_data = save_data
    return result