import base64
import datetime
import json
import re
import time

from parsel import Selector
from urllib import parse
from re_common.baselibrary.tools.all_requests.aiohttp_request import AioHttpRequest
from re_common.baselibrary.tools.all_requests.mrequest import MRequest
from re_common.baselibrary.utils.basedict import BaseDicts
from re_common.baselibrary.utils.basetime import BaseTime
from re_common.vip.baseencodeid import BaseLngid

from apps.crawler_platform.core_platform.core_g import SQLTable, CoreSqlValue
from apps.crawler_platform.core_platform.g_model import DealModel, InputPlatformModel, journalInputMode, \
    RedisAllTaskModel, CallBackModel, JournalListModel, JournalIssueModel, DealInsertModel, JournalHomeModel, \
    DealUpdateModel, DealItemModel, OperatorSqlModel, JournalVolumeModel, EtlCallBackModel, JournalArticleModel, \
    EtlDealModel
from settings import get_settings

__all__ = [
    "naturejournal_naturehome_callback",
    "naturejournal_naturelist_callback",
    "naturejournal_naturevolume_callback",
    "naturejournal_natureissue_callback",
    "naturejournal_naturearticle_callback",
    "naturejournal_naturearticle_etl_callback",
    "rscjournal_rschome_callback",
    "rscjournal_rsclist_callback",
    "rscjournal_rscvolume_callback",
    "rscjournal_rscissue_callback",
    "rscjournal_rscarticle_callback",
    "rscjournal_rscarticle_etl_callback",

    "sciencedirectjournal_article_etl_callback",
    "cambridgejournal_article_etl_callback",
    "pnasjournal_article_etl_callback",
    "frontiersinjournal_article_etl_callback",
    "ieeejournal_article_etl_callback",
]


def clean_pubdate(value):
    if not value:
        return ''
    value = re.sub('\D', '', value)
    value = value[:8]
    value = value.ljust(8, '0')
    if int(value[4:6]) > 12:
        value = value[:4] + '0000'
    if int(value[6:]) > 31:
        value = value[:6] + '00'
    return value


def naturejournal_naturehome_callback(callmodel: CallBackModel[JournalHomeModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    data = para_dicts["data"]
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in data:
        di_model = DealInsertModel()
        di_model.insert_pre = CoreSqlValue.insert_ig_it
        for item in data["1_1"]['info']['children']:
            journal_rawid = item['url_part'].replace('/', '')
            temp_info = info_dicts.copy()
            task_tag_next = temp_info["task_tag_next"]
            temp_info["task_tag"] = task_tag_next
            del temp_info["task_tag_next"]
            temp_info["journal_rawid"] = journal_rawid
            temp_info["sub_db_id"] = "00035"
            temp_info["is_active"] = "1"
            temp_info["subject"] = ""

            new_dict = dict()
            new_dict['journal_name'] = item['title']
            temp_info["journal_json"] = json.dumps(new_dict, ensure_ascii=False)
            di_model.lists.append(temp_info)

            du_model = DealUpdateModel()
            du_model.update.update({"is_active": "1"})
            du_model.where.update({"journal_rawid": journal_rawid,
                                   "task_tag": temp_info["task_tag"],
                                   "task_name": callmodel.sql_model.task_name})
            result.next_dicts.update_list.append(du_model)

        result.next_dicts.insert.append(di_model)

    return result


def naturejournal_naturelist_callback(callmodel: CallBackModel[JournalListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    data = para_dicts["data"]
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in data:
        di_model = DealInsertModel()
        di_model.insert_pre = CoreSqlValue.insert_ig_it
        for item in data["1_1"]['info']['children']:
            journal_rawid = callmodel.sql_model.journal_rawid
            temp_info = info_dicts.copy()
            task_tag_next = temp_info["task_tag_next"]
            temp_info["task_tag"] = task_tag_next
            temp_info["pub_year"] = re.findall('\d+', item['year'])[0]
            temp_info["num"] = item['url_part'].split('/')[-1]
            del temp_info["task_tag_next"]
            temp_info["journal_rawid"] = journal_rawid
            temp_info["sub_db_id"] = "00035"

            new_dict = dict()
            temp_info["volume_json"] = json.dumps(new_dict, ensure_ascii=False)
            di_model.lists.append(temp_info)

        result.next_dicts.insert.append(di_model)

    return result


def naturejournal_naturevolume_callback(callmodel: CallBackModel[JournalVolumeModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    data = para_dicts["data"]
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in data:
        di_model = DealInsertModel()
        di_model.insert_pre = CoreSqlValue.insert_ig_it
        for item in data["1_1"]['info']['children']:
            journal_rawid = callmodel.sql_model.journal_rawid
            temp_info = info_dicts.copy()
            task_tag_next = temp_info["task_tag_next"]
            temp_info["task_tag"] = task_tag_next
            temp_info["pub_year"] = item['year'].split(' ')[-1]
            issue_num = item['num'].split(' ')[-1]
            volume_num = callmodel.sql_model.num
            temp_info["num"] = f"{volume_num}_{issue_num}"
            temp_info["page"] = '0'
            del temp_info["task_tag_next"]
            temp_info["journal_rawid"] = journal_rawid
            temp_info["sub_db_id"] = "00035"

            new_dict = dict()
            new_dict['issue_num'] = issue_num
            new_dict['volume_num'] = volume_num
            temp_info["issue_json"] = json.dumps(new_dict, ensure_ascii=False)
            di_model.lists.append(temp_info)

        result.next_dicts.insert.append(di_model)

    return result


def naturejournal_natureissue_callback(callmodel: CallBackModel[JournalIssueModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    data = para_dicts["data"]
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in data:
        di_model = DealInsertModel()
        di_model.insert_pre = CoreSqlValue.insert_ig_it
        befor_json = callmodel.sql_model.dict()
        befor_json = json.loads(befor_json['issue_json'])
        for item in data["1_1"]['info']['children']:
            journal_rawid = callmodel.sql_model.journal_rawid
            issue_num = befor_json['issue_num']
            volume_num = befor_json['volume_num']
            temp_info = info_dicts.copy()
            task_tag_next = temp_info["task_tag_next"]
            temp_info["task_tag"] = task_tag_next
            del temp_info["task_tag_next"]
            temp_info["rawid"] = item['url_part'].split('/')[-1]
            temp_info["sub_db_id"] = "00035"

            new_dict = dict()
            new_dict['journal_rawid'] = journal_rawid
            new_dict['title'] = item['title'].replace(';', ' ')
            new_dict['issue_num'] = issue_num
            new_dict['volume_num'] = volume_num
            temp_info["article_info_json"] = json.dumps(new_dict, ensure_ascii=False)
            di_model.lists.append(temp_info)

        result.next_dicts.insert.append(di_model)

    return result


def naturejournal_naturearticle_callback(callmodel: CallBackModel[JournalIssueModel]) -> DealModel:
    result = DealModel()
    return result


def naturejournal_naturearticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    res = Selector(text=html)
    article_json = json.loads(callmodel.sql_model.article_info_json)

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '00035'
    product = 'NATURE'
    provider = 'NATURE'
    sub_db = 'QK'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = product
    data['sub_db'] = sub_db
    data['sub_db_id'] = sub_db_id
    data['provider'] = provider
    data['zt_provider'] = 'naturejournal'
    data['source_type'] = '3'
    data['latest_date'] = down_date_str[:8]
    data['down_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data["country"] = "GB"
    data["language"] = "EN"
    data['title'] = ' '.join(res.xpath('//h1[@class="c-article-title"]//text()').extract()).strip()
    data['title_alt'] = ''
    data['provider_url'] = f'https://www.nature.com/articles/{rawid}'
    data['down_cnt'] = ''
    data['issn'] = ''.join(res.xpath('//span[@itemprop="printIssn"]/text()').extract()).strip()
    data['eissn'] = ''.join(res.xpath('//span[@itemprop="onlineIssn"]/text()').extract()).strip()
    data['cnno'] = ''
    pub_date = ''.join(res.xpath('//a[@data-track-action="publication date"]/time/@datetime').extract()).strip()
    if not pub_date:
        pub_date = ''.join(res.xpath('//meta[@name="citation_publication_date"]/@content').extract()).strip()
    if not pub_date:
        pub_date = ''.join(res.xpath('//meta[@name="dc.date"]/@content').extract()).strip()
    if not pub_date:
        pub_date = ''.join(res.xpath('//meta[@name="prism.publicationDate"]/@content').extract()).strip()
    data['pub_date'] = clean_pubdate(pub_date)
    pub_year = pub_date[:4]
    data['pub_year'] = pub_year
    data['pub_place'] = ''
    data['vol'] = article_json['volume_num']
    data['num'] = article_json['issue_num']
    data['journal_raw_id'] = article_json['journal_rawid']
    data['journal_name'] = ''.join(res.xpath('//i[@data-test="journal-title"]/text()').extract()).strip()
    data['journal_name_alt'] = ''
    data['down_cnt'] = ''
    data['doi'] = ''.join(res.xpath('//meta[@name="DOI"]/@content').extract()).strip()
    is_oa = ''.join(res.xpath('//header//*[text()="Open Access"]/text()').extract()).strip()
    data['is_oa'] = "1" if is_oa == 'Open Access' else "0"
    mainEntity = json.loads(''.join(res.xpath('//script[@type="application/ld+json"]/text()').extract()).strip())[
        'mainEntity']
    data['begin_page'] = mainEntity['pageStart']
    data['end_page'] = mainEntity['pageEnd']
    author_list = list()
    organ_dict = dict()
    orc_id_list = list()
    for item in mainEntity['author']:
        if item.get('affiliation'):
            author_part = ''
            for aff in item.get('affiliation'):
                if aff['address']['name'] not in organ_dict:
                    organ_dict[aff['address']['name']] = len(organ_dict) + 1
                author_part = author_part + f"[{organ_dict[aff['address']['name']]}]"
            author = f"{item['name']}{author_part}"
        else:
            author = item['name']
        author_list.append(author)
        orc_id_url = item.get('url', '')
        if orc_id_url:
            orc_id = f'{orc_id_url.split("/")[-1]}@{author}'
            orc_id_list.append(orc_id)
    organ_list = list()
    for k, v in organ_dict.items():
        organ_list.append(f"[{v}]{k}")
    data['author'] = ';'.join(author_list).replace('[]', '')
    data['orc_id'] = ';'.join(orc_id_list)
    data['author_1st'] = author_list[0].replace('[1]', '').replace('[]', '') if author_list else ''
    data['organ'] = ';'.join(organ_list)
    email_list = list()
    corr_author_list = list()
    for item in mainEntity['author']:
        if not item.get('email', ''):
            continue
        corr_author_list.append(item['name'])
        email_list.append(f"{item['email']}:{item['name']}")
    data['corr_author'] = ';'.join(corr_author_list)
    data['email'] = ';'.join(email_list)
    data['keyword'] = mainEntity['keywords'].replace(',', ';')
    data['keyword_alt'] = ''
    data['subject_word'] = ';'.join(res.xpath('//ul[@class="c-article-subject-list"]/li/span/text()').extract())
    data['abstract'] = mainEntity['description']
    data['abstract_alt'] = ''
    recv_date = ''.join(res.xpath('//p[text()="Received"]/span[2]/time/@datetime').extract())
    data['recv_date'] = recv_date.replace('-', '')
    revision_date = ''.join(res.xpath('//p[text()="Revised"]/span[2]/time/@datetime').extract())
    data['revision_date'] = revision_date.replace('-', '')
    accept_date = ''.join(res.xpath('//p[text()="Accepted"]/span[2]/time/@datetime').extract())
    data['accept_date'] = accept_date.replace('-', '')
    data['fund'] = ''
    fulltext_type = ''.join(res.xpath('//aside//span[@class="c-pdf-download__text"]/text()').extract())
    data['fulltext_type'] = 'pdf' if fulltext_type else ''
    data['column_info'] = ''.join(res.xpath('//a[@class="u-link-inherit"]/text()').extract()).strip()
    data['cite_info'] = ''.join(
        res.xpath('//p[@class="c-bibliographic-information__citation"]//text()').extract()).strip()
    save_data.append({'table': 'oversea_meta_latest', 'data': data})

    # full_text_data = dict()
    # full_text_data['lngid'] = lngid
    # full_text_data['keyid'] = lngid
    # full_text_data['sub_db_id'] = sub_db_id
    # full_text_data['source_type'] = '3'
    # full_text_data['latest_date'] = down_date_str[:8]
    # full_text_data['batch'] = down_date_str
    # full_text_data['is_deprecated'] = '0'
    # full_text_data['filename'] = f"{lngid}.html"
    # full_text_data['fulltext_type'] = "html"
    # full_text_data['fulltext_addr'] = ''
    # full_text_data['fulltext_size'] = ''
    # fulltext = ''.join(res.xpath('//div[@class="c-article-body"]').extract())
    # full_text_data['fulltext_txt'] = fulltext
    # full_text_data['page_cnt'] = "1"
    # full_text_data['pub_year'] = pub_year
    # save_data.append({'table': 'journal_fulltext_latest', 'data': full_text_data})

    ref_data = dict()
    ref_data['lngid'] = lngid
    ref_data['keyid'] = lngid
    ref_data['sub_db_id'] = sub_db_id
    ref_data['source_type'] = '3'
    ref_data['latest_date'] = down_date_str[:8]
    ref_data['batch'] = down_date_str
    ref_data['is_deprecated'] = '0'
    ref_data['pub_year'] = pub_year
    refer_info = list()
    ref_id_list = list()
    ref_metas = res.xpath('//meta[@name="citation_reference"]/@content').extract()
    ref_ps = res.xpath('//p[@class="c-article-references__text"]/text()').extract()
    num = 0
    for ref_meta, ref_p in zip(ref_metas, ref_ps):
        num += 1
        ref_one = dict()
        ref_one["is_deprecated"] = "0"
        ref_one["batch"] = down_date_str
        ref_one["sub_db_id"] = sub_db_id
        ref_one["product"] = product
        ref_one["sub_db"] = sub_db
        ref_one["provider"] = provider
        ref_one["down_date"] = down_date_str[:8]
        ref_one["cited_rawid"] = rawid
        ref_one["cited_lngid"] = lngid
        ref_one["sub_db_id"] = sub_db_id
        ref_lngid = "{}{}".format(lngid, str(num).zfill(4))
        ref_id_list.append(ref_lngid)
        ref_one["lngid"] = ref_lngid
        ref_one["keyid"] = ref_lngid
        ref_author_list = list()
        ref_one["refer_text_raw"] = ref_p
        ref_one["refer_text_site"] = ref_p
        ref_info = dict()
        if 'citation_journal_title' in ref_meta:
            for info in re.split('; (?=citation_)', re.sub(' {4,}', '; ', ref_meta)):
                k, v = info.split('=', 1)
                if k.strip() == 'citation_author':
                    vs = v.split(',')
                    v_list = list()
                    for i in vs:
                        v_list.append(f'{" ".join(i.split(" ")[1:])} {i.split(" ")[0]}')
                    ref_info[k.strip()] = ','.join(v_list)
                else:
                    ref_info[k.strip()] = v.strip()
        ref_one["author"] = ref_info.get("citation_author", '')
        ref_one["title"] = ref_info.get("citation_title", '')
        ref_one["strtype"] = ''
        ref_one["source_name"] = ref_info.get("citation_journal_title", '')
        ref_one["pub_year"] = ref_info.get("citation_publication_date", '')
        ref_one["vol"] = ref_info.get("citation_volume", '')
        ref_one["num"] = ''
        ref_one["doi"] = ref_info.get("citation_doi", '')
        page_info = ref_info.get("citation_pages", '')
        ref_one["begin_page"] = page_info.split('-')[0]
        ref_one["end_page"] = page_info.split('-')[-1]
        ref_one["page_info"] = page_info
        refer_info.append(ref_one)
    ref_data['ref_id'] = ';'.join(ref_id_list)
    ref_data['ref_cnt'] = str(len(ref_id_list))
    ref_data['refer_info'] = refer_info
    save_data.append({'table': 'oversea_ref_latest', 'data': ref_data})

    result.save_data = save_data
    return result


def rscjournal_rschome_callback(callmodel: CallBackModel[JournalHomeModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    data = para_dicts["data"]
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in data:
        di_model = DealInsertModel()
        di_model.insert_pre = CoreSqlValue.insert_ig_it
        html = callmodel.para_dicts['data']['1_1']['html']
        res = Selector(text=html)
        li_list = res.xpath('//ul[@class="list__collection"]/li')
        for li in li_list:
            href = li.xpath('a/@href').extract_first().strip()
            journal_name = ''.join(li.xpath('a/span/span/text() ').extract()).strip()
            journal_rawid = href.split('?')[0].split('/')[-1]
            temp_info = info_dicts.copy()
            task_tag_next = temp_info["task_tag_next"]
            temp_info["task_tag"] = task_tag_next
            del temp_info["task_tag_next"]
            temp_info["journal_rawid"] = journal_rawid
            temp_info["sub_db_id"] = "00429"
            temp_info["is_active"] = "1"
            temp_info["subject"] = ""

            new_dict = dict()
            new_dict['journal_name'] = journal_name
            temp_info["journal_json"] = json.dumps(new_dict, ensure_ascii=False)
            di_model.lists.append(temp_info)

            du_model = DealUpdateModel()
            du_model.update.update({"is_active": "1"})
            du_model.where.update({"journal_rawid": journal_rawid,
                                   "task_tag": temp_info["task_tag"],
                                   "task_name": callmodel.sql_model.task_name})
            result.next_dicts.update_list.append(du_model)

        result.next_dicts.insert.append(di_model)

    return result


def rscjournal_rsclist_callback(callmodel: CallBackModel[JournalListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    data = para_dicts["data"]
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in data:
        di_model = DealInsertModel()
        di_model.insert_pre = CoreSqlValue.insert_ig_it
        html = callmodel.para_dicts['data']['1_1']['html']
        res = Selector(text=html)
        li_list = res.xpath('//div[@id="issue-nav"]/select/option')
        for li in li_list:
            value = li.xpath('@value').extract_first().strip()
            journal_rawid = callmodel.sql_model.journal_rawid
            temp_info = info_dicts.copy()
            task_tag_next = temp_info["task_tag_next"]
            temp_info["task_tag"] = task_tag_next
            temp_info["pub_year"] = value.split('#')[0]
            if 'yrname' in value:
                temp_info["num"] = value.split('#')[0]
            else:
                temp_info["num"] = value.split('#')[1]
            del temp_info["task_tag_next"]
            temp_info["journal_rawid"] = journal_rawid
            temp_info["sub_db_id"] = "00429"

            latestissueid = re.findall("latestissueid: '(.*?)'", callmodel.para_dicts['data']['1_1']['html'])[0]
            jname = re.findall("jname: '(.*?)'", callmodel.para_dicts['data']['1_1']['html'])[0]
            new_dict = dict()
            new_dict['value'] = value
            new_dict['latestissueid'] = latestissueid
            new_dict['name'] = journal_rawid.upper()
            new_dict['jname'] = jname
            temp_info["volume_json"] = json.dumps(new_dict, ensure_ascii=False)
            di_model.lists.append(temp_info)

        result.next_dicts.insert.append(di_model)

    return result


def rscjournal_rscvolume_callback(callmodel: CallBackModel[JournalVolumeModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    data = para_dicts["data"]
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in data:
        di_model = DealInsertModel()
        di_model.insert_pre = CoreSqlValue.insert_ig_it
        befor_json = json.loads(callmodel.sql_model.dict()['volume_json'])
        html = callmodel.para_dicts['data']['1_1']['html']
        res = Selector(text=html)
        li_list = res.xpath('//ul[@class="list__collection"]/li')
        for li in li_list:
            journal_rawid = callmodel.sql_model.journal_rawid
            temp_info = info_dicts.copy()
            task_tag_next = temp_info["task_tag_next"]
            temp_info["task_tag"] = task_tag_next
            temp_info["pub_year"] = callmodel.sql_model.pub_year
            href = li.xpath('a/@href').extract_first().strip().replace('&amp;', '&')
            issn = li.xpath('a/@data-issnprint').extract_first().strip()
            eissn = li.xpath('a/@data-issnonline').extract_first().strip()
            data_issueid = li.xpath('a/@data-issueid').extract_first().strip()
            issue_url = 'https://pubs.rsc.org' + href
            issue_num_info = li.xpath('a/span/text()').extract_first().strip()
            issue_num = ''.join(re.findall('Issue (\d+)', issue_num_info))
            volume_num = callmodel.sql_model.num
            temp_info["num"] = data_issueid
            temp_info["page"] = '0'
            del temp_info["task_tag_next"]
            temp_info["journal_rawid"] = journal_rawid
            temp_info["sub_db_id"] = "00429"

            new_dict = dict()
            new_dict['issue_num'] = issue_num
            new_dict['volume_num'] = volume_num
            new_dict['issue_url'] = issue_url
            new_dict['issue_num_info'] = issue_num_info
            new_dict['issn'] = issn
            new_dict['eissn'] = eissn
            new_dict['data_issueid'] = data_issueid
            new_dict['value'] = befor_json['value']
            new_dict['jname'] = befor_json['jname'].replace('"', "'")
            new_dict['name'] = befor_json['name']
            new_dict['latestissueid'] = befor_json['latestissueid']
            temp_info["issue_json"] = json.dumps(new_dict, ensure_ascii=False)
            di_model.lists.append(temp_info)

        result.next_dicts.insert.append(di_model)

    return result


def rscjournal_rscissue_callback(callmodel: CallBackModel[JournalIssueModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    data = para_dicts["data"]
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in data:
        di_model = DealInsertModel()
        di_model.insert_pre = CoreSqlValue.insert_ig_it
        befor_json = callmodel.sql_model.dict()
        befor_json = json.loads(befor_json['issue_json'])
        html = callmodel.para_dicts['data']['1_1']['html']
        res = Selector(text=html)
        li_list = res.xpath('//div[contains(@class,"capsule--article")]')
        for li in li_list:
            journal_rawid = callmodel.sql_model.journal_rawid
            temp_info = info_dicts.copy()
            task_tag_next = temp_info["task_tag_next"]
            temp_info["task_tag"] = task_tag_next
            del temp_info["task_tag_next"]
            href = li.xpath('a/@href').extract_first().strip()
            article_url = 'https://pubs.rsc.org' + href
            title = li.xpath('a/h3/text()').extract_first().strip()
            rawid = li.xpath('a/@name').extract_first().lower()
            temp_info["rawid"] = rawid
            temp_info["sub_db_id"] = "00429"

            new_dict = dict()
            new_dict['journal_rawid'] = journal_rawid
            new_dict['title'] = title
            new_dict['issue_num'] = befor_json['issue_num']
            new_dict['volume_num'] = befor_json['volume_num']
            new_dict['issue_url'] = befor_json['issue_url']
            new_dict['article_url'] = article_url
            new_dict['issue_num_info'] = befor_json['issue_num_info']
            new_dict['issn'] = befor_json['issn']
            new_dict['eissn'] = befor_json['eissn']
            new_dict['jname'] = befor_json['jname']
            new_dict['name'] = befor_json['name']
            new_dict['data_issueid'] = befor_json['data_issueid']
            new_dict['value'] = befor_json['value']
            temp_info["article_info_json"] = json.dumps(new_dict, ensure_ascii=False)
            di_model.lists.append(temp_info)

        result.next_dicts.insert.append(di_model)

    return result


def rscjournal_rscarticle_callback(callmodel: CallBackModel[JournalIssueModel]) -> DealModel:
    result = DealModel()
    return result


def rscjournal_rscarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    res = Selector(text=html)
    article_json = json.loads(callmodel.sql_model.article_info_json)

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '00429'
    product = 'RSC'
    provider = 'ROYALSOCIETY'
    sub_db = 'QK'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = product
    data['sub_db'] = sub_db
    data['sub_db_id'] = sub_db_id
    data['provider'] = provider
    data['zt_provider'] = 'rscjournal'
    data['source_type'] = '3'
    data['latest_date'] = down_date_str[:8]
    data['down_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data["country"] = "GB"
    data["language"] = "EN"
    data['title'] = ' '.join(res.xpath('//div[@class="article__title"]/h2//text()').extract()).strip()
    data['title_alt'] = ''
    data['provider_url'] = article_json['article_url']
    data['down_cnt'] = ''
    data['issn'] = article_json['issn']
    data['eissn'] = article_json['eissn']
    data['cnno'] = ''
    pub_date = ''.join(res.xpath('//dt[text()="First published"]/following::dd[1]/text()').extract()).strip()
    data['pub_date'] = datetime.datetime.strptime(pub_date, "%d %b %Y").strftime("%Y%m%d") if pub_date else ''
    if not pub_date:
        pub_date = ''.join(res.xpath('//meta[@name="citation_publication_date"]/@content').extract()).strip()
        data['pub_date'] = clean_pubdate(pub_date)
    pub_year = data['pub_date'][:4]
    data['pub_year'] = pub_year
    data['pub_place'] = ''
    data['vol'] = article_json['volume_num']
    if article_json['volume_num'] == pub_year:
        data['vol'] = ''
    data['num'] = article_json['issue_num']
    data['journal_raw_id'] = article_json['journal_rawid']
    data['journal_name'] = article_json['jname']
    data['journal_name_alt'] = ''
    data['down_cnt'] = ''
    data['doi'] = ''.join(res.xpath('//meta[@name="citation_doi"]/@content').extract()).strip()
    data['is_oa'] = '0'
    issue_num_info = article_json['issue_num_info']
    data['begin_page'] = re.findall('Page (.*?) to', issue_num_info)[0]
    data['end_page'] = re.findall('to (.*)', issue_num_info)[0]
    author_list = list()
    organ_list = list()
    organ_dict = dict()
    orc_id_list = list()
    email_list = list()
    corr_author_list = list()
    for index, p in enumerate(res.xpath('//div[@id="pnlAuthorAffiliations"]/div/p')):
        k = ''.join(p.xpath('span[1]/sup/text()').extract()).strip()
        if '*' in k:
            continue
        organ = ''.join(p.xpath('span[2]/text()[1]').extract()).strip()
        email = ''.join(p.xpath('span[2]/a[1]/text()').extract()).strip()
        organ_dict[k] = {'index': index + 1, 'organ': organ, 'email': email}
    for span in res.xpath('//div[@class="article__authors"]/span'):
        name = ''.join(span.xpath('a[1]/text()').extract()).replace('\r\n', ' ').strip()
        for k in ''.join(span.xpath('span/sup/i/text()').extract()).strip():
            author_list.append(name + f'[{organ_dict[k]["index"]}]')
        if not ''.join(span.xpath('span/sup/i/text()').extract()).strip():
            author_list.append(name)
        corr_info = ''.join(span.xpath('text()').extract()).strip()
        if "*" in corr_info:
            corr_author_list.append(name)
            for k in ''.join(span.xpath('span/sup/i/text()').extract()).strip():
                if not organ_dict[k]["email"]:
                    continue
                email_list.append(f"{organ_dict[k]['email']}:{name}")
                break
        orc_id_info = ''.join(span.xpath('span[@class="orcid ver-b"]/a/@href').extract())
        if orc_id_info:
            orc_id_list.append(f"{orc_id_info.split('/')[-1]}@{name}")
    for k, v in organ_dict.items():
        organ_list.append(f"[{v['index']}]{v['organ']}")
    data['author'] = ';'.join(author_list).replace('[]', '')
    data['author_1st'] = re.sub('\[.*?\]', '', author_list[0]) if author_list else ''
    data['organ'] = ';'.join(organ_list)
    data['corr_author'] = ';'.join(corr_author_list)
    data['orc_id'] = ';'.join(orc_id_list)
    data['email'] = ';'.join(email_list)
    data['keyword'] = ''.join(res.xpath('//meta[@name="keywords"]/@content').extract()).strip()
    data['keyword_alt'] = ''
    data['abstract'] = ''.join(res.xpath('//div[@class="capsule__column-wrapper"]//text()').extract()).strip().replace(
        '\n', ' ').replace('\r', ' ')
    data['abstract_alt'] = ''
    recv_date = ''.join(res.xpath('//dt[text()="Submitted"]/following::dd[1]/text()').extract()).strip()
    data['recv_date'] = datetime.datetime.strptime(recv_date, "%d %b %Y").strftime("%Y%m%d") if recv_date else ''
    accept_date = ''.join(res.xpath('//dt[text()="Accepted"]/following::dd[1]/text()').extract()).strip()
    data['accept_date'] = datetime.datetime.strptime(accept_date, "%d %b %Y").strftime("%Y%m%d") if accept_date else ''
    data['raw_type'] = ''.join(res.xpath('//strong[text()="Article type"]/following::dd[1]/text()').extract()).strip()
    data['fund'] = ''
    data['column_info'] = ''
    save_data.append({'table': 'oversea_meta_latest', 'data': data})

    # full_text_data = dict()
    # full_text_data['lngid'] = lngid
    # full_text_data['keyid'] = lngid
    # full_text_data['sub_db_id'] = sub_db_id
    # full_text_data['source_type'] = '3'
    # full_text_data['latest_date'] = down_date_str[:8]
    # full_text_data['batch'] = down_date_str
    # full_text_data['is_deprecated'] = '0'
    # full_text_data['filename'] = f"{lngid}.html"
    # full_text_data['fulltext_type'] = "html"
    # full_text_data['fulltext_addr'] = ''
    # full_text_data['fulltext_size'] = ''
    # fulltext = ''.join(res.xpath('//article[@class="article-control"]').extract())
    # full_text_data['fulltext_txt'] = fulltext
    # full_text_data['page_cnt'] = "1"
    # full_text_data['pub_year'] = pub_year
    # save_data.append({'table': 'journal_fulltext_latest', 'data': full_text_data})

    ref_data = dict()
    ref_data['lngid'] = lngid
    ref_data['keyid'] = lngid
    ref_data['sub_db_id'] = sub_db_id
    ref_data['source_type'] = '3'
    ref_data['latest_date'] = down_date_str[:8]
    ref_data['batch'] = down_date_str
    ref_data['is_deprecated'] = '0'
    ref_data['pub_year'] = pub_year
    refer_info = list()
    ref_id_list = list()
    ref_metas = res.xpath('//meta[@name="citation_reference"]/@content').extract()
    num = 0
    for ref_meta in ref_metas:
        num += 1
        ref_one = dict()
        ref_one["is_deprecated"] = "0"
        ref_one["batch"] = down_date_str
        ref_one["sub_db_id"] = sub_db_id
        ref_one["product"] = product
        ref_one["sub_db"] = sub_db
        ref_one["provider"] = provider
        ref_one["down_date"] = down_date_str[:8]
        ref_one["cited_rawid"] = rawid
        ref_one["cited_lngid"] = lngid
        ref_one["sub_db_id"] = sub_db_id
        ref_lngid = "{}{}".format(lngid, str(num).zfill(4))
        ref_id_list.append(ref_lngid)
        ref_one["lngid"] = ref_lngid
        ref_one["keyid"] = ref_lngid
        ref_author_list = list()

        ref_info = dict()
        ref_meta = re.sub(";.*?citation_", ";__citation_", ref_meta, flags=re.S)
        if ref_meta.endswith(';'):
            ref_meta = ref_meta[:-1]
        for info in ref_meta.split(';__'):
            if 'src="' in info:
                info = re.sub('<.*?src=".*?>', '', info)
            k, v = info.split('=')
            if k.strip() == 'citation_author':
                ref_author_list.append(v.strip())
            else:
                ref_info[k.strip()] = v.replace(';', ' ').strip()
        temp = ';'.join(ref_author_list)
        if temp != "":
            temp = temp + "."
        title_one = ref_info.get("citation_journal_title", '')
        if title_one != "":
            temp = temp + title_one + "."
        if ref_info.get("citation_journal_title", ''):
            temp = temp + ref_info.get("citation_journal_title", '') + ","
        if ref_info.get("citation_publication_date", ''):
            temp = temp + ref_info.get("citation_publication_date", '') + ","
        if ref_info.get("citation_volume", ''):
            temp = temp + ref_info.get("citation_volume", '') + "."
        if ref_info.get("citation_pages", ''):
            temp = temp + ref_info.get("citation_pages", '') + "."

        ref_info['citation_author'] = ';'.join(ref_author_list)
        ref_one["refer_text_raw"] = ref_meta
        ref_one["refer_text_site"] = temp
        ref_one["author"] = ';'.join(ref_author_list)
        ref_one["title"] = ref_info.get("citation_journal_title", '')
        ref_one["strtype"] = ''
        ref_one["source_name"] = ref_info.get("citation_journal_title", '')
        ref_one["pub_year"] = ref_info.get("citation_publication_date", '')
        ref_one["vol"] = ref_info.get("citation_volume", '')
        ref_one["doi"] = ''
        ref_one["num"] = ref_info.get("citation_issue", '')
        page_info = ref_info.get("citation_pages", '')
        ref_one["begin_page"] = page_info.split('-')[0]
        ref_one["end_page"] = page_info.split('-')[-1]
        ref_one["page_info"] = page_info
        refer_info.append(ref_one)
    ref_data['ref_id'] = ';'.join(ref_id_list)
    ref_data['ref_cnt'] = str(len(ref_id_list))
    ref_data['refer_info'] = refer_info
    save_data.append({'table': 'oversea_ref_latest', 'data': ref_data})

    result.save_data = save_data
    return result


def sciencedirectjournal_article_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    res = Selector(text=html)
    article_json = json.loads(callmodel.sql_model.article_info_json)

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '00018'
    product = 'SCIENCEDIRECT'
    provider = 'ELSEVIER'
    sub_db = 'QK'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = product
    data['sub_db'] = sub_db
    data['sub_db_id'] = sub_db_id
    data['provider'] = provider
    data['zt_provider'] = 'sciencedirectjournal'
    data['source_type'] = '3'
    data['latest_date'] = down_date_str[:8]
    data['down_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data["country"] = "NL"
    data["language"] = "EN"
    data['title'] = ' '.join(res.xpath('//span[@class="title-text"]//text()').extract()).strip()
    data['title_alt'] = ''
    data['provider_url'] = f'https://www.sciencedirect.com/science/article/abs/pii/{rawid}'
    data['down_cnt'] = ''
    data['issn'] = ''.join(res.xpath('//meta[@name="citation_issn"]/@content').extract()).strip()
    data['eissn'] = ''
    data['cnno'] = ''
    pub_date = ''.join(res.xpath('//meta[@name="citation_publication_date"]/@content').extract()).strip()
    data['pub_date'] = clean_pubdate(pub_date)
    pub_year = data['pub_date'][:4]
    data['pub_year'] = pub_year
    data['pub_place'] = ''
    data['vol'] = ''.join(res.xpath('//meta[@name="citation_volume"]/@content').extract()).strip()
    data['num'] = ''.join(res.xpath('//meta[@name="citation_issue"]/@content').extract()).strip()
    data['journal_raw_id'] = article_json['journal_rawid']
    data['journal_name'] = ''.join(res.xpath('//h2[@id="publication-title"]//text()').extract()).strip()
    data['journal_name_alt'] = ''
    data['down_cnt'] = ''
    data['doi'] = ''.join(res.xpath('//meta[@name="citation_doi"]/@content').extract()).strip()
    data['is_oa'] = str(article_json.get('openAccess', {}).get('oaArticleStatus', {}).get('isOpenAccess', '0'))
    data['begin_page'] = ''.join(res.xpath('//meta[@name="citation_firstpage"]/@content').extract()).strip()
    data['end_page'] = ''.join(res.xpath('//meta[@name="citation_lastpage"]/@content').extract()).strip()
    data_info = re.findall('data-iso-key="_0">(.*?)</script>', html)[0]
    data_info_json = json.loads(data_info)
    author_info_list = list()
    organ_info_list = list()
    if data_info_json['authors']['content']:
        for item in data_info_json['authors']['content'][0]['$$']:
            if item['#name'] == 'author':
                author_dict = {}
                one_author_list = [''] * 3
                organ_code = list()
                email_ = ''
                for i in item['$$']:
                    if i['#name'] == 'given-name':
                        one_author_list[0] = i['_']
                    if i['#name'] == 'surname':
                        one_author_list[1] = i['_']
                    if i['#name'] == 'degrees':
                        one_author_list[2] = i['_']
                    if i['#name'] == 'cross-ref':
                        if i['$']['refid'] == 'COR1':
                            continue
                        organ_code.append(i['$$'][0]['_'])
                    if i['#name'] == 'encoded-e-address':
                        email_info = base64.b64decode(i['__encoded']).decode('utf-8')
                        email_ = json.loads(parse.unquote(email_info))['_']
                author_dict['name'] = ' '.join(one_author_list).strip()
                author_dict['organ_code'] = organ_code
                author_dict['email'] = email_
                author_info_list.append(author_dict)
            if item['#name'] == 'affiliation':
                organ_dict = dict()
                organ_dict['organ_code'] = ''
                organ_dict['organ_name'] = ''
                for i in item['$$']:
                    if i['#name'] == 'label':
                        organ_dict['organ_code'] = i['_']
                    if i['#name'] == 'textfn':
                        organ_dict['organ_name'] = i['_']
                organ_info_list.append(organ_dict)
    author_list = list()
    email_list = list()
    corr_author_list = list()
    for author_dict in author_info_list:
        author_ogr = ''
        for o_c in author_dict['organ_code']:
            for index, organ_dict in enumerate(organ_info_list):
                if organ_dict['organ_code'] == o_c:
                    author_ogr += f'[{index + 1}]'
            author_list.append(f'{author_dict["name"]}{author_ogr}')
        if not author_dict['organ_code']:
            for index, organ_dict in enumerate(organ_info_list):
                if organ_dict['organ_code'] == '':
                    author_ogr += f'[{index + 1}]'
            author_list.append(f'{author_dict["name"]}{author_ogr}')
        if author_dict['email']:
            email_list.append(f'{author_dict["email"]}:{author_dict["name"]}')
            corr_author_list.append(author_dict["name"])
    organ_list = list()
    for index, organ_dict in enumerate(organ_info_list):
        organ_list.append(f'[{index + 1}]{organ_dict["organ_name"]}')
    data['author'] = ';'.join(author_list).replace('[]', '')
    data['author_1st'] = re.sub('\[.*?\]', '', author_list[0]) if author_list else ''
    data['organ'] = ';'.join(organ_list)
    data['corr_author'] = ';'.join(corr_author_list)
    data['orc_id'] = ''
    data['email'] = ';'.join(email_list)
    data['keyword'] = ''
    data['keyword_alt'] = ''
    data['abstract'] = ' '.join(
        res.xpath('//h2[text()="Abstract"]/following-sibling::div[1]//text()').extract()).strip().replace('\n',
                                                                                                          ' ').replace(
        '\r', ' ')
    data['abstract_alt'] = ''
    recv_date = data_info_json['article']['dates'].get('Received', '')
    data['recv_date'] = datetime.datetime.strptime(recv_date, "%d %B %Y").strftime("%Y%m%d") if recv_date else ''
    revision_date = data_info_json['article']['dates'].get('Revised', [])
    data['revision_date'] = datetime.datetime.strptime(revision_date[0], "%d %B %Y").strftime(
        "%Y%m%d") if revision_date else ''
    accept_date = data_info_json['article']['dates'].get('Accepted', '')
    data['accept_date'] = datetime.datetime.strptime(accept_date, "%d %B %Y").strftime("%Y%m%d") if accept_date else ''
    online_date = data_info_json['article']['dates'].get('Available online', '')
    data['online_date'] = datetime.datetime.strptime(online_date, "%d %B %Y").strftime("%Y%m%d") if online_date else ''
    data['raw_type'] = ''
    data['fund'] = ''
    data['column_info'] = ''
    save_data.append({'table': 'oversea_meta_latest', 'data': data})

    ref_data = dict()
    ref_data['lngid'] = lngid
    ref_data['keyid'] = lngid
    ref_data['sub_db_id'] = sub_db_id
    ref_data['source_type'] = '3'
    ref_data['latest_date'] = down_date_str[:8]
    ref_data['batch'] = down_date_str
    ref_data['is_deprecated'] = '0'
    ref_data['pub_year'] = pub_year
    refer_info = list()
    ref_id_list = list()
    li_list = res.xpath('//div[@id="preview-section-references"]//ul/li')
    num = 0
    for li in li_list:
        num += 1
        ref_one = dict()
        ref_one["is_deprecated"] = "0"
        ref_one["batch"] = down_date_str
        ref_one["sub_db_id"] = sub_db_id
        ref_one["product"] = product
        ref_one["sub_db"] = sub_db
        ref_one["provider"] = provider
        ref_one["down_date"] = down_date_str[:8]
        ref_one["cited_rawid"] = rawid
        ref_one["cited_lngid"] = lngid
        ref_one["sub_db_id"] = sub_db_id
        ref_lngid = "{}{}".format(lngid, str(num).zfill(4))
        ref_id_list.append(ref_lngid)
        ref_one["lngid"] = ref_lngid
        ref_one["keyid"] = ref_lngid

        author = ''.join(li.xpath('span[1]//text()').extract()).strip()
        title = ''.join(li.xpath('h3//text()').extract()).strip()
        year_info = ''.join(li.xpath('span[2]//text()').extract()).strip()
        ref_one["refer_text_raw"] = f'{author} {title} {year_info}'
        ref_one["refer_text_site"] = f'{author} {title} {year_info}'
        ref_one["author"] = author
        ref_one["title"] = title
        ref_one["strtype"] = ''
        ref_one["source_name"] = re.sub('\(\d{4}\)', '', year_info)
        year = re.findall("\((\d{4})\)", year_info)
        ref_one["pub_year"] = year[-1] if year else ''
        ref_one["vol"] = ''
        ref_one["doi"] = ''
        ref_one["num"] = ''
        ref_one["begin_page"] = ''
        ref_one["end_page"] = ''
        ref_one["page_info"] = ''
        refer_info.append(ref_one)
    ref_data['ref_id'] = ';'.join(ref_id_list)
    ref_data['ref_cnt'] = str(len(ref_id_list))
    ref_data['refer_info'] = refer_info
    save_data.append({'table': 'oversea_ref_latest', 'data': ref_data})

    result.save_data = save_data
    return result


def cambridgejournal_article_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    res = Selector(text=html)
    article_json = json.loads(callmodel.sql_model.article_info_json)

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '00146'
    product = 'CAMBRIDGE'
    provider = 'CAMBRIDGE'
    sub_db = 'QK'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = product
    data['sub_db'] = sub_db
    data['sub_db_id'] = sub_db_id
    data['provider'] = provider
    data['zt_provider'] = 'cambridgejournal'
    data['source_type'] = '3'
    data['latest_date'] = down_date_str[:8]
    data['down_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data["country"] = "GB"
    data["language"] = "EN"
    data['title'] = ' '.join(res.xpath('//meta[@name="citation_title"]/@content').extract()).strip()
    data['title_alt'] = ''
    data['provider_url'] = f'https://www.cambridge.org{article_json["art_href"]}'
    data['down_cnt'] = ''
    issn_list = res.xpath('//meta[@name="citation_issn"]/@content').extract()
    data['issn'] = ''
    data['eissn'] = ''
    if len(issn_list) == 2:
        data['issn'] = issn_list[0]
        data['eissn'] = issn_list[0]
    if len(issn_list) == 1:
        data['issn'] = issn_list[0]
    data['cnno'] = ''
    pub_date = ''.join(res.xpath('//meta[@name="citation_online_date"]/@content').extract()).strip()
    data['pub_date'] = clean_pubdate(pub_date)
    pub_year = pub_date[:4]
    data['pub_year'] = pub_year
    data['pub_place'] = ''
    data['vol'] = ''.join(res.xpath('//meta[@name="citation_volume"]/@content').extract()).strip()
    data['num'] = ''.join(res.xpath('//meta[@name="citation_issue"]/@content').extract()).strip()
    data['journal_raw_id'] = article_json['journal_rawid']
    data['journal_name'] = ''.join(res.xpath('//meta[@name="citation_journal_title"]/@content').extract()).strip()
    data['journal_name_alt'] = ''
    data['down_cnt'] = ''
    data['doi'] = ''.join(res.xpath('//meta[@name="citation_doi"]/@content').extract()).strip()
    oa_info = ''.join(res.xpath('//span[@class="open-access"]//text()').extract()).strip()
    data['is_oa'] = '1' if oa_info == 'Open access' else '0'
    data['begin_page'] = ''.join(res.xpath('//meta[@name="citation_firstpage"]/@content').extract()).strip()
    data['end_page'] = ''.join(res.xpath('//meta[@name="citation_lastpage"]/@content').extract()).strip()
    author_list = list()
    email_list = list()
    corr_author_list = list()
    organ_list = list()
    orc_id_list = list()
    author_intro_list = list()
    for div in res.xpath('//dl[@id="authors-details"]/div[@class="row author"]'):
        author = ''.join(div.xpath('dt/text()').extract()).strip()
        if author.endswith('*'):
            author = author[:-1]
            if author not in corr_author_list:
                corr_author_list.append(author)
        organ = ''.join(div.xpath('dd/div//text()').extract()).strip()
        if organ in organ_list:
            index = organ_list.index(organ)
        else:
            organ_list.append(organ)
            index = organ_list.index(organ)
        author_list.append(f'{author}[{index + 1}]')
    emails = list()
    for div in res.xpath('//dl[@id="authors-details"]/div[@class="row"]//div[@class="d-inline"]'):
        text_info = ''.join(div.xpath('.//text()').extract()).strip()
        email = ''.join(div.xpath('.//div[@class="corresp"]/a[1]/text()').extract()).strip()
        if not email and 'Email:' in text_info:
            email = text_info.split('Email:')[-1].strip()
        emails.append(email)
        author_intro_list.append(text_info)
    for div in res.xpath('//img[@class="app-icon icon orcid"]/ancestor::div[1]'):
        author = ''.join(div.xpath('a[1]//text()').extract()).strip()
        orc_href = ''.join(div.xpath('a[2]/@href').extract()).strip()
        orc_id_list.append(f'{orc_href.split("/")[-1]}@{author}')
    for corr_author, email in zip(corr_author_list, emails):
        if email:
            email_list.append(f'{email}:{corr_author}')
    data['author'] = ';'.join(author_list).replace('[]', '')
    data['author_intro'] = ';'.join(author_intro_list)
    data['author_1st'] = re.sub('\[.*?\]', '', author_list[0]) if author_list else ''
    data['organ'] = ';'.join([f'[{i + 1}]{v}' for i, v in enumerate(organ_list)])
    data['corr_author'] = ';'.join(corr_author_list)
    data['orc_id'] = ';'.join(orc_id_list)
    data['email'] = ';'.join(email_list)
    data['keyword'] = ''.join(res.xpath('//meta[@name="citation_keywords"]/@content').extract()).strip()
    data['keyword_alt'] = ''
    data['abstract'] = ' '.join(res.xpath('//div[@class="abstract-content"]//text()').extract()).strip().replace('\n',
                                                                                                                 ' ').replace(
        '\r', ' ')
    data['abstract_alt'] = ''
    recv_date = ''.join(res.xpath('//dt[text()="Submitted"]/following::dd[1]/text()').extract()).strip()
    data['recv_date'] = datetime.datetime.strptime(recv_date, "%d %b %Y").strftime("%Y%m%d") if recv_date else ''
    data['accept_date'] = ''
    data['raw_type'] = ''.join(res.xpath(
        '//dl[@class="article-details"]//dt[contains(text(),"Type")]/following-sibling::dd[1]//text()').extract()).strip()
    data['fund'] = ''
    data['column_info'] = ''
    save_data.append({'table': 'oversea_meta_latest', 'data': data})

    ref_data = dict()
    ref_data['lngid'] = lngid
    ref_data['keyid'] = lngid
    ref_data['sub_db_id'] = sub_db_id
    ref_data['source_type'] = '3'
    ref_data['latest_date'] = down_date_str[:8]
    ref_data['batch'] = down_date_str
    ref_data['is_deprecated'] = '0'
    ref_data['pub_year'] = pub_year
    refer_info = list()
    ref_id_list = list()
    num = 0
    for div in res.xpath('//div[@id="references-list"]/div'):
        num += 1
        ref_one = dict()
        ref_one["is_deprecated"] = "0"
        ref_one["batch"] = down_date_str
        ref_one["sub_db_id"] = sub_db_id
        ref_one["product"] = product
        ref_one["sub_db"] = sub_db
        ref_one["provider"] = provider
        ref_one["down_date"] = down_date_str[:8]
        ref_one["cited_rawid"] = rawid
        ref_one["cited_lngid"] = lngid
        ref_one["sub_db_id"] = sub_db_id
        ref_lngid = "{}{}".format(lngid, str(num).zfill(4))
        ref_id_list.append(ref_lngid)
        ref_one["lngid"] = ref_lngid
        ref_one["keyid"] = ref_lngid
        refer_text = ''.join(div.xpath('.//div[contains(@id,"reference")]//text()').extract()).strip()
        del_s = ''.join(div.xpath('.//div[contains(@id,"reference")]/a[@class="ref-link"]//text()').extract()).strip()
        ref_one["refer_text_raw"] = refer_text.replace(del_s, '').strip()
        ref_one["refer_text_site"] = refer_text.replace(del_s, '').strip()
        ref_author_list = list()
        for span in div.xpath('.//div[contains(@id,"reference")]/span[@class="string-name"]'):
            ref_author_list.append(''.join(span.xpath('.//text()').extract()).strip())
        ref_one["author"] = ';'.join(ref_author_list)
        ref_one["title"] = ''.join(
            div.xpath('.//div[contains(@id,"reference")]/span[@class="article-title"]//text()').extract()).strip()
        ref_one["strtype"] = ''
        ref_one["source_name"] = ''.join(
            div.xpath('.//div[contains(@id,"reference")]/span[@class="source"]//text()').extract()).strip()
        ref_one["pub_year"] = ''.join(
            div.xpath('.//div[contains(@id,"reference")]/span[@class="year"]//text()').extract()).strip()
        ref_one["vol"] = ''.join(
            div.xpath('.//div[contains(@id,"reference")]/span[@class="volume"]//text()').extract()).strip()
        ref_one["num"] = ''
        ref_doi = ''.join(div.xpath('.//div[contains(@id,"reference")]/a[@class="uri"]/@href').extract()).strip()
        ref_one["doi"] = ref_doi.split('/')[-1]
        begin_page = ''.join(
            div.xpath('.//div[contains(@id,"reference")]/span[@class="fpage"]//text()').extract()).strip()
        end_page = ''.join(
            div.xpath('.//div[contains(@id,"reference")]/span[@class="lpage"]//text()').extract()).strip()
        ref_one["begin_page"] = begin_page
        ref_one["end_page"] = end_page
        ref_one["page_info"] = f'{begin_page}-{end_page}' if begin_page else ''
        refer_info.append(ref_one)
    ref_data['ref_id'] = ';'.join(ref_id_list)
    ref_data['ref_cnt'] = str(len(ref_id_list))
    ref_data['refer_info'] = refer_info
    save_data.append({'table': 'oversea_ref_latest', 'data': ref_data})

    result.save_data = save_data
    return result


def pnasjournal_article_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    res = Selector(text=html)
    article_json = json.loads(callmodel.sql_model.article_info_json)

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '00146'
    product = 'PNAS'
    provider = 'NAS'
    sub_db = 'QK'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = product
    data['sub_db'] = sub_db
    data['sub_db_id'] = sub_db_id
    data['provider'] = provider
    data['zt_provider'] = 'pnasjournal'
    data['source_type'] = '3'
    data['latest_date'] = down_date_str[:8]
    data['down_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data["country"] = "US"
    data["language"] = "EN"
    data['title'] = ' '.join(res.xpath('//div[@class="core-container"]/h1//text()').extract()).strip()
    data['title_alt'] = ''
    data['provider_url'] = f'https://www.pnas.org{article_json["art_href"]}'
    data['down_cnt'] = ''
    data['issn'] = '1091-6490'
    data['eissn'] = ''
    data['cnno'] = ''
    pub_date = ''.join(res.xpath('//meta[@name="citation_online_date"]/@content').extract()).strip()
    data['pub_date'] = clean_pubdate(pub_date)
    pub_year = pub_date[:4]
    data['pub_year'] = pub_year
    data['pub_place'] = ''
    data['publisher'] = ''.join(res.xpath('//meta[@name="citation_publisher"]/@content').extract()).strip()
    data['vol'] = ''.join(res.xpath('//meta[@name="citation_volume"]/@content').extract()).strip()
    data['num'] = ''.join(res.xpath('//meta[@name="citation_issue"]/@content').extract()).strip()
    data['journal_raw_id'] = article_json['journal_rawid']
    data['journal_name'] = ''.join(res.xpath('//meta[@name="citation_journal_title"]/@content').extract()).strip()
    data['journal_name_alt'] = ''
    data['down_cnt'] = ''
    data['doi'] = ''.join(res.xpath('//meta[@name="citation_doi"]/@content').extract()).strip()
    oa_info = ''.join(
        res.xpath('//div[@class="meta-panel__access meta-panel__access--open"]/span//text()').extract()).strip()
    data['is_oa'] = '1' if oa_info == 'Open access' else '0'
    data['begin_page'] = ''
    data['end_page'] = ''
    author_list = list()
    email_list = list()
    corr_author_list = list()
    organ_list = list()
    orc_id_list = list()
    author_intro_list = list()
    for div in res.xpath('//section[@class="core-authors"]/div'):
        author = ' '.join(div.xpath('.//span[@property="givenName"]//text()').extract()).strip() + ' '.join(
            div.xpath('.//span[@property="familyName"]//text()').extract()).strip()
        orc_href = ''.join(div.xpath('.//h5/a[@class="orcid-id"]/@href').extract()).strip()
        if orc_href:
            orc_id_list.append(f'{orc_href.split("/")[-1]}@{author}')
        s = f'{author}'
        for organ_div in div.xpath('.//div[@class="affiliations"]/div'):
            organ = ''.join(organ_div.xpath('span//text()').extract()).strip()
            if organ in organ_list:
                index = organ_list.index(organ)
            else:
                organ_list.append(organ)
                index = organ_list.index(organ)
            s = s + f'[{index + 1}]'
        author_list.append(s)
        email_info = ''.join(div.xpath('.//h5/a[@property="email"]//text()').extract()).strip()
        if email_info:
            corr_author_list.append(author)
            email_list.append(f'{email_info}:{author}')
    data['author'] = ';'.join(author_list).replace('[]', '')
    data['author_intro'] = ';'.join(author_intro_list)
    data['author_1st'] = re.sub('\[.*?\]', '', author_list[0]) if author_list else ''
    data['organ'] = ';'.join([f'[{i + 1}]{v}' for i, v in enumerate(organ_list)])
    data['corr_author'] = ';'.join(corr_author_list)
    data['orc_id'] = ';'.join(orc_id_list)
    data['email'] = ';'.join(email_list)
    data['keyword'] = ''.join(res.xpath('//meta[@name="keywords"]/@content').extract()).strip().replace(',', ';')
    data['keyword_alt'] = ''
    data['abstract'] = ' '.join(res.xpath('//section[@id="abstract"]/div//text()').extract()).strip().replace('\n',
                                                                                                              ' ').replace(
        '\r', ' ')
    data['abstract_alt'] = ''
    data['recv_date'] = ''
    data['accept_date'] = ''
    data['raw_type'] = ''.join(res.xpath('//meta[@name="citation_article_type"]/@content').extract()).strip()
    data['fund'] = ''
    data['column_info'] = ''
    save_data.append({'table': 'oversea_meta_latest', 'data': data})

    ref_data = dict()
    ref_data['lngid'] = lngid
    ref_data['keyid'] = lngid
    ref_data['sub_db_id'] = sub_db_id
    ref_data['source_type'] = '3'
    ref_data['latest_date'] = down_date_str[:8]
    ref_data['batch'] = down_date_str
    ref_data['is_deprecated'] = '0'
    ref_data['pub_year'] = pub_year
    refer_info = list()
    ref_id_list = list()
    num = 0
    for div in res.xpath('//section[@id="bibliography"]//div[@class="citations"]'):
        num += 1
        ref_one = dict()
        ref_one["is_deprecated"] = "0"
        ref_one["batch"] = down_date_str
        ref_one["sub_db_id"] = sub_db_id
        ref_one["product"] = product
        ref_one["sub_db"] = sub_db
        ref_one["provider"] = provider
        ref_one["down_date"] = down_date_str[:8]
        ref_one["cited_rawid"] = rawid
        ref_one["cited_lngid"] = lngid
        ref_one["sub_db_id"] = sub_db_id
        ref_lngid = "{}{}".format(lngid, str(num).zfill(4))
        ref_id_list.append(ref_lngid)
        ref_one["lngid"] = ref_lngid
        ref_one["keyid"] = ref_lngid
        refer_text = ''.join(div.xpath('div[@class="citation"]//text()').extract()).strip()
        ref_one["refer_text_raw"] = refer_text
        ref_one["refer_text_site"] = refer_text
        ref_one["author"] = ''
        ref_one["title"] = ''
        ref_one["strtype"] = ''
        ref_one["source_name"] = ''
        ref_one["pub_year"] = ''
        ref_one["vol"] = ''
        ref_one["num"] = ''
        ref_one["doi"] = ''
        ref_one["begin_page"] = ''
        ref_one["end_page"] = ''
        ref_one["page_info"] = ''
        refer_info.append(ref_one)
    ref_data['ref_id'] = ';'.join(ref_id_list)
    ref_data['ref_cnt'] = str(len(ref_id_list))
    ref_data['refer_info'] = refer_info
    save_data.append({'table': 'oversea_ref_latest', 'data': ref_data})

    result.save_data = save_data
    return result


def frontiersinjournal_article_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    res = Selector(text=html)
    article_json = json.loads(callmodel.sql_model.article_info_json)

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '00146'
    product = 'FRONTIERSIN'
    provider = 'FRONTIERS'
    sub_db = 'QK'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = product
    data['sub_db'] = sub_db
    data['sub_db_id'] = sub_db_id
    data['provider'] = provider
    data['zt_provider'] = 'frontiersinjournal'
    data['source_type'] = '3'
    data['latest_date'] = down_date_str[:8]
    data['down_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data["country"] = "CH"
    data["language"] = "EN"
    data['title'] = ' '.join(res.xpath('//div[@class="JournalAbstract"]/h1//text()').extract()).strip()
    data['title_alt'] = ''
    data['provider_url'] = f'{article_json["art_href"]}'
    data['down_cnt'] = ''
    data['issn'] = ''.join(res.xpath('//meta[@name="citation_issn"]/@content').extract()).strip()
    data['eissn'] = ''
    data['cnno'] = ''
    pub_date = ''.join(res.xpath('//meta[@name="citation_publication_date"]/@content').extract()).strip()
    data['pub_date'] = clean_pubdate(pub_date)
    pub_year = pub_date[:4]
    data['pub_year'] = pub_year
    data['pub_place'] = ''
    data['publisher'] = ''.join(res.xpath('//meta[@name="citation_publisher"]/@content').extract()).strip()
    data['vol'] = ''.join(res.xpath('//meta[@name="citation_volume"]/@content').extract()).strip()
    data['num'] = ''
    data['journal_raw_id'] = article_json['journal_rawid']
    data['journal_name'] = ''.join(res.xpath('//meta[@name="citation_journal_title"]/@content').extract()).strip()
    data['journal_name_alt'] = ''
    data['down_cnt'] = ''
    data['doi'] = ''.join(res.xpath('//meta[@name="citation_doi"]/@content').extract()).strip()
    data['is_oa'] = ''
    data['begin_page'] = ''
    data['end_page'] = ''
    author_list = list()
    email_list = list()
    corr_author_list = list()
    organ_list = list()
    orc_id_list = list()
    author_intro_list = list()
    author_text = '$$'.join(res.xpath('//div[@class="authors"]//text()').extract()).strip().strip("$$")
    for author_info in author_text.split('$$ '):
        author = author_info.strip().strip('$$').split('$$', 1)[0]
        if '$$*' in author_info:
            corr_author_list.append(author)
        num_info = ''
        for i in author_info.strip().strip('$$').split('$$', 1)[-1].replace('$$*', '').replace('$$†', '').split(','):
            num_info += f'[{i}]'
        author_list.append(f'{author}{num_info}')
    for li in res.xpath('//ul[@class="notes"]/li'):
        organ = ''.join(li.xpath('text()').extract()).strip()
        index = ''.join(li.xpath('span//text()').extract()).strip()
        organ_list.append(f'[{index}]{organ}')
    email_author = ''.join(
        res.xpath('//span[contains(text(),"*Correspondence:")]/parent::p[1]/text()').extract()).strip()
    email_info = ''.join(
        res.xpath('//span[contains(text(),"*Correspondence:")]/parent::p[1]/a/text()').extract()).strip()
    email_s = ''
    if email_info:
        email_s = f'{email_info}:{email_author}'.strip(',')
    data['author'] = ';'.join(author_list).replace('[]', '')
    data['author_intro'] = ';'.join(author_intro_list)
    data['author_1st'] = re.sub('\[.*?\]', '', author_list[0]) if author_list else ''
    data['organ'] = ';'.join(organ_list)
    data['corr_author'] = ';'.join(corr_author_list)
    data['orc_id'] = ';'.join(orc_id_list)
    data['email'] = email_s
    data['keyword'] = ''.join(res.xpath('//meta[@name="citation_keywords"]/@content').extract()).strip()
    data['keyword_alt'] = ''
    data['abstract'] = ' '.join(res.xpath('//meta[@name="citation_abstract"]/@content').extract()).strip()
    data['abstract_alt'] = ''
    date_text = ''.join(res.xpath('//p[@id="timestamps"]//text()').extract()).strip()
    data['recv_date'] = ''
    data['accept_date'] = ''
    for date_info in date_text.split(';'):
        if 'Received:' in date_info:
            date = date_info.replace('Received:', '').strip()
            data['recv_date'] = datetime.datetime.strptime(date, "%d %B %Y").strftime("%Y%m%d")
        if 'Accepted:' in date_info:
            date = date_info.replace('Accepted:', '').strip()
            data['accept_date'] = datetime.datetime.strptime(date, "%d %B %Y").strftime("%Y%m%d")
    data['raw_type'] = ''
    data['fund'] = ''
    data['column_info'] = ''
    save_data.append({'table': 'oversea_meta_latest', 'data': data})

    ref_data = dict()
    ref_data['lngid'] = lngid
    ref_data['keyid'] = lngid
    ref_data['sub_db_id'] = sub_db_id
    ref_data['source_type'] = '3'
    ref_data['latest_date'] = down_date_str[:8]
    ref_data['batch'] = down_date_str
    ref_data['is_deprecated'] = '0'
    ref_data['pub_year'] = pub_year
    refer_info = list()
    ref_id_list = list()
    num = 0
    for div in res.xpath('//div[@class="References"]'):
        num += 1
        ref_one = dict()
        ref_one["is_deprecated"] = "0"
        ref_one["batch"] = down_date_str
        ref_one["sub_db_id"] = sub_db_id
        ref_one["product"] = product
        ref_one["sub_db"] = sub_db
        ref_one["provider"] = provider
        ref_one["down_date"] = down_date_str[:8]
        ref_one["cited_rawid"] = rawid
        ref_one["cited_lngid"] = lngid
        ref_one["sub_db_id"] = sub_db_id
        ref_lngid = "{}{}".format(lngid, str(num).zfill(4))
        ref_id_list.append(ref_lngid)
        ref_one["lngid"] = ref_lngid
        ref_one["keyid"] = ref_lngid
        refer_text = ''.join(div.xpath('p[@class="ReferencesCopy1"]//text()').extract()).strip()
        ref_one["refer_text_raw"] = refer_text
        ref_one["refer_text_site"] = refer_text
        ref_one["author"] = ''
        ref_one["title"] = ''
        ref_one["strtype"] = ''
        ref_one["source_name"] = ''
        ref_one["pub_year"] = ''
        ref_one["vol"] = ''
        ref_one["num"] = ''
        doi = ''
        if 'doi:' in refer_text:
            doi = refer_text.split('doi:')[-1].strip()
        ref_one["doi"] = doi
        ref_one["begin_page"] = ''
        ref_one["end_page"] = ''
        ref_one["page_info"] = ''
        refer_info.append(ref_one)
    ref_data['ref_id'] = ';'.join(ref_id_list)
    ref_data['ref_cnt'] = str(len(ref_id_list))
    ref_data['refer_info'] = refer_info
    save_data.append({'table': 'oversea_ref_latest', 'data': ref_data})

    result.save_data = save_data
    return result


def ieeejournal_article_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html_s = callmodel.para_dicts['data']['1_1']['html']
    html_j = json.loads(html_s)
    html = html_j['html']
    info_s = re.findall('\.document\.metadata=(.*?)};', html)[0] + '}'
    info_json = json.loads(info_s)
    references_json = json.loads(html_j['references'])
    article_json = json.loads(callmodel.sql_model.article_info_json)

    data = dict()
    down_date_str = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    sub_db_id = '00369'
    product = 'IEEE'
    provider = 'IEEE'
    sub_db = 'QK'
    rawid = callmodel.sql_model.rawid
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    print(lngid)
    data['rawid'] = rawid
    data['rawid_mysql'] = rawid
    data['lngid'] = lngid
    data['keyid'] = lngid
    data['product'] = product
    data['sub_db'] = sub_db
    data['sub_db_id'] = sub_db_id
    data['provider'] = provider
    data['zt_provider'] = 'ieeejournal'
    data['source_type'] = '3'
    data['latest_date'] = down_date_str[:8]
    data['down_date'] = down_date_str[:8]
    data['batch'] = down_date_str
    data['vision'] = '1'
    data['is_deprecated'] = '0'
    data["country"] = "US"
    data["language"] = "EN"
    data['title'] = info_json['displayDocTitle']
    data['title_alt'] = ''
    data['provider_url'] = f'https://ieeexplore.ieee.org/document/{info_json["articleId"]}'
    data['down_cnt'] = ''
    issn = ''
    eissn = ''
    for issn_i in info_json['issn']:
        if 'Print ISSN' == issn_i['format']:
            issn = issn_i['value']
        if 'Electronic ISSN' == issn_i['format']:
            eissn = issn_i['value']
    data['issn'] = issn
    data['eissn'] = eissn
    data['cnno'] = ''
    pub_date = info_json['displayPublicationDate']
    data['pub_date'] = datetime.datetime.strptime(pub_date, "%d %B %Y").strftime("%Y%m%d")
    pub_year = pub_date[:4]
    data['pub_year'] = pub_year
    data['pub_place'] = ''
    data['publisher'] = info_json['publisher']
    data['vol'] = info_json['volume']
    data['num'] = info_json.get('issue', '')
    data['journal_raw_id'] = article_json['journal_rawid']
    data['journal_name'] = article_json['journal_name']
    data['journal_name_alt'] = ''
    data['down_cnt'] = ''
    data['doi'] = info_json.get('doi', '')
    data['is_oa'] = 1 if info_json.get('isOpenAccess', '') == 'true' else ''
    data['begin_page'] = info_json.get('startPage', '')
    data['end_page'] = info_json.get('endPage', '')
    author_list = list()
    email_list = list()
    corr_author_list = list()
    organ_list = list()
    orc_id_list = list()
    author_intro_list = list()
    for author_info in info_json['authors']:
        author = author_info['name']
        orcid = author_info.get('orcid', '')
        if orcid:
            orc_id_list.append(f'{orcid}@{author}')
        s = f'{author}'
        for organ in author_info['affiliation']:
            if organ in organ_list:
                index = organ_list.index(organ)
            else:
                organ_list.append(organ)
                index = organ_list.index(organ)
            s = s + f'[{index + 1}]'
        author_list.append(s)
        author_intro = ''.join(author_info.get('bio', {}).get('p', []))
        if author_intro:
            author_intro_list.append(author_intro)
    data['author'] = ';'.join(author_list).replace('[]', '')
    data['author_intro'] = ';'.join(author_intro_list)
    data['author_1st'] = re.sub('\[.*?\]', '', author_list[0]) if author_list else ''
    data['organ'] = ';'.join([f'[{i + 1}]{v}' for i, v in enumerate(organ_list)])
    data['corr_author'] = ';'.join(corr_author_list)
    data['orc_id'] = ';'.join(orc_id_list)
    data['email'] = ''
    keyword_list = list()
    for keyword_i in info_json['keywords']:
        keyword_list.extend(keyword_i['kwd'])
    data['keyword'] = ';'.join(keyword_list)
    data['keyword_alt'] = ''
    data['abstract'] = info_json['abstract']
    data['abstract_alt'] = ''
    data['recv_date'] = ''
    data['accept_date'] = ''
    data['raw_type'] = ''
    fund_list = list()
    for fund_i in info_json['fundingAgencies']['fundingAgency']:
        if fund_i.get("fundingId", ''):
            fund_s = f'{fund_i["fundingName"]}({fund_i["fundingId"]})'
        else:
            fund_s = fund_i["fundingName"]
        fund_list.append(fund_s)
    data['fund'] = ';'.join(fund_list)
    data['column_info'] = ''
    save_data.append({'table': 'oversea_meta_latest', 'data': data})

    ref_data = dict()
    ref_data['lngid'] = lngid
    ref_data['keyid'] = lngid
    ref_data['sub_db_id'] = sub_db_id
    ref_data['source_type'] = '3'
    ref_data['latest_date'] = down_date_str[:8]
    ref_data['batch'] = down_date_str
    ref_data['is_deprecated'] = '0'
    ref_data['pub_year'] = pub_year
    refer_info = list()
    ref_id_list = list()
    num = 0
    for item in references_json['references']:
        num += 1
        ref_one = dict()
        ref_one["is_deprecated"] = "0"
        ref_one["batch"] = down_date_str
        ref_one["sub_db_id"] = sub_db_id
        ref_one["product"] = product
        ref_one["sub_db"] = sub_db
        ref_one["provider"] = provider
        ref_one["down_date"] = down_date_str[:8]
        ref_one["cited_rawid"] = rawid
        ref_one["cited_lngid"] = lngid
        ref_one["sub_db_id"] = sub_db_id
        ref_lngid = "{}{}".format(lngid, str(num).zfill(4))
        ref_id_list.append(ref_lngid)
        ref_one["lngid"] = ref_lngid
        ref_one["keyid"] = ref_lngid
        refer_text = re.sub(r'<.*?>', '', item['text'])
        ref_one["refer_text_raw"] = refer_text
        ref_one["refer_text_site"] = refer_text
        ref_one["author"] = ''
        ref_one["title"] = item.get('title', '')
        ref_one["strtype"] = ''
        ref_one["source_name"] = ''
        ref_one["pub_year"] = ''
        ref_one["vol"] = ''
        ref_one["num"] = ''
        doi = ''
        link = item.get('links', '')
        if link:
            doi = link.get('crossRefLink', '').replace('https://doi.org/', '')
        ref_one["doi"] = doi
        ref_one["begin_page"] = ''
        ref_one["end_page"] = ''
        ref_one["page_info"] = ''
        refer_info.append(ref_one)
    ref_data['ref_id'] = ';'.join(ref_id_list)
    ref_data['ref_cnt'] = str(len(ref_id_list))
    ref_data['refer_info'] = refer_info
    save_data.append({'table': 'oversea_ref_latest', 'data': ref_data})

    result.save_data = save_data
    return result
