import base64
import json
import re
import sqlite3
import string
import time
import traceback
import zipfile

import aiofiles

from fastapi import APIRouter, Request
from re_common.baselibrary.tools.all_requests.mrequest import MRequest
from re_common.baselibrary.utils.basedict import BaseDicts
from re_common.baselibrary.utils.basedir import BaseDir
from re_common.baselibrary.utils.basefile import BaseFile
from re_common.baselibrary.utils.basetime import BaseTime
from re_common.baselibrary.utils.core.mlamada import bools_string
from re_common.baselibrary.utils.core.requests_core import MsgCode, INSIDE_HEADERS
from starlette.responses import FileResponse

from apps.allsubdb.proxies_control.models import PostProxy
from apps.core.m_route import ContextIncludedRoute
from apps.core.return_info import ReturnInfo, FAILED, SUCCESS
from apps.crawler_platform.core_api.models import UpdateTaskModel
from apps.crawler_platform.core_platform.core_g import SQLTable, CoreSqlValue
from apps.crawler_platform.core_platform.core_sql import CoreSqlMixin
from apps.crawler_platform.core_platform.g_model import InputPlatformModel, journalInputMode, journalIssueMode, \
    journalArticleMode, journalHomeMode, journalVolumeMode, SaveSqlite3, FileGetFromWeb, JournalArticleMode, SaveMdb, \
    ChaoXingIssueHtml, GetCommanderHtml
from apps.crawler_platform.qk_platform.journal_article_down import JournalArtilceDownStep
from apps.crawler_platform.qk_platform.journal_etl_local import JournalEtlLocal
from apps.crawler_platform.qk_platform.journal_etl_remote import JournalEtlRemote
from apps.crawler_platform.qk_platform.journal_home import JournalHomeDownStep
from apps.crawler_platform.qk_platform.journal_issue_down import JournalIssueDownStep
from apps.crawler_platform.qk_platform.journal_qk_down import JournalDownStep
from apps.crawler_platform.qk_platform.journal_volume_down import VolumeDownStep
from apps.crawler_platform.util.requestapihelper import RequestApiHelper
from apps.sql_app.mmongodb import Coll
from settings import get_settings, URLDISTRIBUTED

router = APIRouter(route_class=ContextIncludedRoute)


@router.post("/journal/downhome")
async def downhomejournal(request: Request, input: InputPlatformModel[journalHomeMode]):
    """
    下载期刊首页
    :return:
    """

    return_info = ReturnInfo()
    down = JournalHomeDownStep(request, input)
    bools, err_info = await down.my_task()
    step_info = down.step_info
    return_info.status = bools_string(bools)
    if bools:
        return_info.msg_code = MsgCode.SUCCESS_CODE
        return_info.msg = step_info
        return_info.data = ""
    else:
        return_info.msg_code = 400
        return_info.msg = step_info
        return_info.data = {"err_info": err_info}

    return return_info.todict()


@router.post("/journal/downjournal")
async def downjournal(request: Request, input: InputPlatformModel[journalInputMode]):
    """
    下载期刊列表
    :return:
    """
    return_info = ReturnInfo()
    down = JournalDownStep(request, input)
    bools, err_info = await down.my_task()
    step_info = down.step_info
    return_info.status = bools_string(bools)
    if bools:
        return_info.msg_code = MsgCode.SUCCESS_CODE
        return_info.msg = step_info
        return_info.data = ""
    else:
        return_info.msg_code = 400
        return_info.msg = step_info
        return_info.data = {"err_info": err_info}

    return return_info.todict()


@router.post("/journal/downvolume")
async def downvolume(request: Request, input: InputPlatformModel[journalVolumeMode]):
    """
    下载期刊列表
    :return:
    """
    return_info = ReturnInfo()
    down = VolumeDownStep(request, input)
    bools, err_info = await down.my_task()
    step_info = down.step_info
    return_info.status = bools_string(bools)
    if bools:
        return_info.msg_code = MsgCode.SUCCESS_CODE
        return_info.msg = step_info
        return_info.data = ""
    else:
        return_info.msg_code = 400
        return_info.msg = step_info
        return_info.data = {"err_info": err_info}

    return return_info.todict()


@router.post("/journal/downissue")
async def downjournal(request: Request, input: InputPlatformModel[journalIssueMode]):
    """
    下载期刊列表
    :return:
    """
    return_info = ReturnInfo()
    down = JournalIssueDownStep(request, input)
    bools, err_info = await down.my_task()
    step_info = down.step_info
    return_info.status = bools_string(bools)
    if bools:
        return_info.msg_code = MsgCode.SUCCESS_CODE
        return_info.msg = step_info
        return_info.data = ""
    else:
        return_info.msg_code = 400
        return_info.msg = step_info
        return_info.data = {"err_info": err_info}

    return return_info.todict()


@router.post("/journal/downarticle")
async def downjournal(request: Request, input: InputPlatformModel[journalArticleMode]):
    """
    下载期刊列表
    :return:
    """
    return_info = ReturnInfo()
    down = JournalArtilceDownStep(request, input)
    bools, err_info = await down.my_task()
    step_info = down.step_info
    return_info.status = bools_string(bools)
    if bools:
        return_info.msg_code = MsgCode.SUCCESS_CODE
        return_info.msg = step_info
        return_info.data = ""
    else:
        return_info.msg_code = 400
        return_info.msg = step_info
        return_info.data = {"err_info": err_info}
    return return_info.todict()


@router.post("/journal/etl/local")
async def journal_etl_local(request: Request, input: InputPlatformModel[journalArticleMode]):
    """
    期刊本地etl
    :return:
    """
    return_info = ReturnInfo()
    down = JournalEtlLocal(request, input)
    bools, err_info = await down.my_task()
    step_info = down.step_info
    return_info.status = bools_string(bools)
    if bools:
        return_info.msg_code = MsgCode.SUCCESS_CODE
        return_info.msg = step_info
        return_info.data = ""
    else:
        return_info.msg_code = 400
        return_info.msg = step_info
        return_info.data = {"err_info": err_info}
    return return_info.todict()


@router.post("/journal/etl/remote")
async def journal_etl_remote(request: Request, input: InputPlatformModel[JournalArticleMode]):
    """
    期刊远程etl
    :return:
    """
    return_info = ReturnInfo()
    down = JournalEtlRemote(request, input)
    bools, err_info = await down.my_task()
    step_info = down.step_info
    return_info.status = bools_string(bools)
    if bools:
        return_info.msg_code = MsgCode.SUCCESS_CODE
        return_info.msg = step_info
        return_info.data = ""
    else:
        return_info.msg_code = 400
        return_info.msg = step_info
        return_info.data = {"err_info": err_info}
    return return_info.todict()


async def get_proxy(task_name, task_tag):
    """
    获取代理接口
    """
    data = InputPlatformModel(
        data=PostProxy(task_name=task_name,
                       task_tag=task_tag
                       )).json()
    url = get_settings().PROXY_URL
    rrq = RequestApiHelper.get_rrq()
    kwargs = {
        "rrq": rrq,
        "header": INSIDE_HEADERS,
        "url": url,
        "timeout": 30,
        "data": data,
        "moths": MRequest.POST,
        "middler_list": [rrq.status_code_middlerwares, rrq.is_none_html_middlerwares, rrq.is_null_html_middlerwares,
                         rrq.msg_status_code_middlerwares]
    }
    bools, dicts = await RequestApiHelper.etl_remote_meta(**kwargs)
    if bools:
        proxy = json.loads(rrq.html)["data"]
        return bools, proxy
    else:
        return bools, 'nothing'


async def chaoxing_post_cookie(cookie):
    basekey_list = [{"task_name": "chaoxingjournal", "task_tag": "chaoxingjournallist", "order_num": 1, "groups": "1"},
                    {"task_name": "chaoxingjournal", "task_tag": "chaoxingqkhomeclass", "order_num": 1, "groups": "1"},
                    {"task_name": "chaoxingjournal", "task_tag": "chaoxingissue", "order_num": 1, "groups": "1"},
                    ]
    # basekey = {"task_name": "chaoxingjournal", "task_tag": "chaoxingjournallist", "order_num": 1, "groups": "1"}
    upset = [{"key": "headers", "value": {"key": "Cookie", "value": cookie}, "type": "dict", "flag": "False"}]

    for basekey in basekey_list:
        data = InputPlatformModel(
            data=UpdateTaskModel(basekey=basekey,
                                 upset=upset
                                 )).json()
        url = get_settings().UPDATE_TASK

        rrq = RequestApiHelper.get_rrq()
        kwargs = {
            "rrq": rrq,
            "header": INSIDE_HEADERS,
            "url": url,
            "timeout": 30,
            "data": data,
            "moths": MRequest.POST,
            "middler_list": [rrq.is_null_html_middlerwares, rrq.status_code_middlerwares]
        }
        bools, dicts = await RequestApiHelper.etl_remote_meta(**kwargs)
        if not bools:
            return bools
    return True


@router.post("/chaoxingjournal/get_cookie")
async def get_cookie(input: InputPlatformModel[journalInputMode]):
    """
        获取超星cookie 接收期刊ID参数 进行对应请求,以防止对单个URL请求过多导致被封风险
    """
    return_info = ReturnInfo()
    pa_list = list()
    rule1 = re.compile("'Set-Cookie': \'(.*?)\'")
    rule2 = re.compile(";.*")
    bools, proxies = await get_proxy("chaoxing_cookie", "cx_cookie")
    journal_rawid = input.data.journal_rawid
    if bools:
        headers = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
            "Host": "qikan.chaoxing.com",
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.80 Safari/537.36'

        }
        url = "https://qikan.chaoxing.com/mag/infos?mags=%s" % (journal_rawid)

        rrq = RequestApiHelper.get_rrq()
        kwargs = {
            "rrq": rrq,
            "header": headers,
            "url": url,
            "timeout": 30,
            "proxy": proxies,
            "moths": MRequest.GET,
            "middler_list": [],
            "allow_redirects": False
        }
        boolsg, dicts = await RequestApiHelper.etl_remote_meta(**kwargs)
        if boolsg:
            cookie1_str = rrq.resp.headers
            cookie1_str_list = re.findall(rule1, str(cookie1_str))
            for i in cookie1_str_list:
                pa = rule2.sub("", i)
                pa_list.append(pa)
            cookie1 = ";".join(pa_list)
            pa_list.clear()
            headers["Host"] = "fxlogin.chaoxing.com"
            headers["Cookie"] = "cookiecheck=true"
            if "Location" in str(cookie1_str):
                url = cookie1_str['Location']
            else:
                url = ""
            rrq.set_url(url).set_sn(None) \
                .set_timeout(30).set_allow_redirects(False) \
                .set_proxy(proxies).set_header(headers).set_middler_list([])
            boolsg2, dicts2 = await rrq.run(MRequest.GET)
            if boolsg2:
                cookie2_str = rrq.resp.headers
                cookie2_str_list = re.findall(rule1, str(cookie2_str))
                for i in cookie2_str_list:
                    if not i.startswith("JSESSIONID"):
                        pa = rule2.sub("", i)
                        pa_list.append(pa)
                cookie2 = ";".join(pa_list)
                cookie = ";".join([cookie1, cookie2])
                bools_post = await chaoxing_post_cookie(cookie)
                if bools_post:
                    return_info.status = SUCCESS
                    return_info.msg_code = MsgCode.SUCCESS_CODE
                    return_info.msg = "cookie请求成功"
                    return_info.data = {"Cookie": cookie}
                    return return_info.todict()
                else:
                    return_info.status = FAILED
                    return_info.msg_code = MsgCode.CHAOXIN_COOKIE_ERROR
                    return_info.msg = "请求存储API失败"
                    return_info.data = ""
                    return return_info.todict()
            else:
                return_info.status = FAILED
                return_info.msg_code = MsgCode.CHAOXIN_COOKIE_ERROR
                return_info.msg = "代理请求二级目标网页失败"
                return_info.data = ""
                return return_info.todict()

        else:
            return_info.status = FAILED
            return_info.msg_code = MsgCode.CHAOXIN_COOKIE_ERROR
            return_info.msg = "代理请求一级目标网页失败"
            return_info.data = ""
            return return_info.todict()
    else:
        return_info.status = FAILED
        return_info.msg_code = MsgCode.CHAOXIN_COOKIE_ERROR
        return_info.msg = "代理获取失败"
        return_info.data = ""
        return return_info.todict()


async def cookie_rule(cookie_str):
    cookie_list = []
    rule1 = re.compile(r"(JSESSIONID=.*?);")
    rule2 = re.compile(r"(route=.*?);")
    rule3 = re.compile(r"(__dxca=.*?);")
    data1 = re.findall(rule1, cookie_str)
    if len(data1) != 0:
        cookie_list.append(data1[0])
    data2 = re.findall(rule2, cookie_str)
    if len(data2) != 0:
        cookie_list.append(data2[0])
    data3 = re.findall(rule3, cookie_str)
    if len(data3) != 0:
        cookie_list.append(data3[0])
    cookie = ";".join(cookie_list)
    return cookie


@router.post("/chaoxingjournal/dama")
async def get_cookie(input: InputPlatformModel[journalInputMode]):
    """
        超星打码
    """
    return_info = ReturnInfo()
    url = "https://fxlogin.chaoxing.com/antispiderShowVerify.ac"
    headers = {
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
        "Host": "fxlogin.chaoxing.com",
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.80 Safari/537.36'

    }
    proxies = "192.168.31.176:8019"
    rrq = RequestApiHelper.get_rrq()
    kwargs = {
        "rrq": rrq,
        "header": headers,
        "url": url,
        "timeout": 30,
        "proxy": proxies,
        "moths": MRequest.GET,
        "middler_list": [],
        "allow_redirects": True
    }
    pa_list = list()
    boolsg, dicts = await RequestApiHelper.etl_remote_meta(**kwargs)
    if boolsg:
        header_first = rrq.resp.headers
        rule1 = re.compile("'Set-Cookie': \'(.*?)\'")
        rule2 = re.compile(";.*")

        cookie1_str_list = re.findall(rule1, str(header_first))
        for i in cookie1_str_list:
            pa = rule2.sub("", i)
            pa_list.append(pa)
        cookie1 = ";".join(pa_list)
        png_url = "https://fxlogin.chaoxing.com/processVerifyPng.ac"
        header_png = {
            'Accept': 'image/avif,image/webp,image/apng,image/svg+xml,image/*,*/*;q=0.8',
            'Host': 'fxlogin.chaoxing.com',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko',
            'Cookie': cookie1
        }
        rrq = RequestApiHelper.get_rrq()
        kwargs = {
            "rrq": rrq,
            "header": header_png,
            "url": png_url,
            "timeout": 30,
            "proxy": proxies,
            "moths": MRequest.GET,
            "middler_list": [],
            "allow_redirects": True
        }
        boolsp, dictsp = await RequestApiHelper.etl_remote_meta(**kwargs)
        if boolsp:

            # async with aiofiles.open("1111.png", mode="wb") as f:
            #     await f.write(rrq.html_bytes)
            # 怎么用不清楚, 先把结构搭起来
            content = rrq.html_bytes
            base64_data = base64.b64encode(content)
            b64 = base64_data.decode()
            data = {"username": "xujiang", "password": "xu1994323", "typeid": 7, "image": b64}
            rrq = RequestApiHelper.get_rrq()
            kwargs = {
                "rrq": rrq,
                "header": header_png,
                "url": "http://api.ttshitu.com/predict",
                "timeout": 30,
                "proxy": proxies,
                "data": data,
                "moths": MRequest.POST,
                "middler_list": [],
            }
            boolsp1, dictsp1 = await RequestApiHelper.etl_remote_meta(**kwargs)
            if boolsp1:
                text = json.loads(rrq.html)
                if text['success']:
                    vcode = text["data"]["result"]


@router.get("/export/wanfang_qk_info")
async def export_wanfang():
    return_info = ReturnInfo()
    task_name = "wanfangjournal"
    task_tag = "wanfangjournallist"
    code_publisher_list = await CoreSqlMixin.get_code_publisher()
    code_publisher_dict = dict()
    for row in code_publisher_list:
        code_publisher_dict[row["publisher"]] = row["country"]
    try:
        rows = await CoreSqlMixin.get_export_journallist(task_name, task_tag)
        data_list = []
        for info in rows:
            journal_rawid = info["journal_rawid"]
            row = json.loads(info["journal_json"])
            data = {}
            data["journal_rawid"] = journal_rawid
            last_year1 = row['last_year']
            other_dicts_1 = info["other_dicts"]
            other_dicts_2 = json.loads(other_dicts_1)
            last_year2 = BaseDicts.is_dict_exit_key(other_dicts_2, "last_year")
            if last_year1 != last_year2:
                last_year = last_year2
            else:
                last_year = last_year1
            if len(last_year) == 0:
                # 出现情况, 外围list提供了这本刊, 但是内部下架, 因此和孔老师讨论后, 决定这样的情况跳过
                continue
            collect_database = row.get("collect_database")
            if not collect_database:
                collect_database = other_dicts_2.get("collect_database", "")
            data["journal_name"] = row['qk_name']
            trans_title = row['trans_title'].strip()
            lst = trans_title.split('  ')
            data["journal_name_alt"] = lst[0]
            journal_name_korea = ''
            if len(lst) == 2:
                journal_name_korea = lst[1]
            data["journal_name_korea"] = journal_name_korea
            data["issn"] = row['issn'].replace("ISSN","").strip()
            data["cnno"] = row['cn'].replace("CN","").strip()
            data["publisher"] = row['hostunit_name']
            publisher_list = data["publisher"].split(";")
            country = "CN"
            for k, v in code_publisher_dict.items():
                if k in publisher_list:
                    country = v
                    break
                elif k in data["publisher"]:
                    country = v
                    break
            data["director_dept"] = row['dep_name']
            data["type_name"] = row['type_name']  # row['publish_cycle']
            data["award_state"] = row['award_state']
            data["collect_database"] = collect_database
            language = row['language']
            if language == 'c;h;i':
                language = '中文'
            elif language == 'e;n;g':
                language = '英文'
            data["language"] = language.replace("中文", "ZH").replace("英文", "EN")
            data["country"] = country
            wf_impact = ''
            if row['wf_impact'] != '' and float(row['wf_impact']) > 0:
                wf_impact = '%.2f' % (float(row['wf_impact']))
                wf_impact = '%s@%s' % (str(wf_impact), info['update_time'].strftime('%Y%m%d'))
            data["wf_impact"] = wf_impact
            data["chief_editor"] = row['chief_editor']
            data["journal_name_used"] = row['former_name']
            data["journal_intro"] = row['perio_desc']
            data["post_code"] = row['post_code']
            data["is_stop"] = row['is_stop']
            data["email"] = row['email']
            data["edit_office_addr"] = row['edit_office_addr']
            data["tel_code"] = row['tel_code']
            data["web_site"] = row['web_site']
            data["fax"] = row['fax']
            data["subject"] = info['subject'].lstrip(";")
            data["collect_newyear"] = last_year
            provider_url = f"WF@https://sns.wanfangdata.com.cn/perio/{journal_rawid}"
            data["provider_url"] = provider_url
            data_list.append(data)
        if len(data_list) == 0:
            return_info.status = FAILED
            return_info.msg_code = 400
            return_info.msg = "该任务目前无数据(含数据重置),请检查,或改日再提取"
            return_info.data = traceback.format_exc()
            return return_info.todict()
        url = get_settings().DB3_EXPORT_URL
        data = InputPlatformModel[SaveSqlite3](
            data=SaveSqlite3(
                task_tag=task_tag,
                data_list=data_list
            )
        ).json()
        rrq = RequestApiHelper.get_rrq()
        kwargs = {
            "rrq": rrq,
            "header": INSIDE_HEADERS,
            "url": url,
            "timeout": 30,
            "data": data,
            "moths": MRequest.POST,
            "middler_list": [rrq.status_code_middlerwares, rrq.is_none_html_middlerwares, rrq.is_null_html_middlerwares,
                             rrq.msg_status_code_middlerwares]
        }
        bools, dicts = await RequestApiHelper.etl_remote_meta(**kwargs)
        if bools:
            my_dicts = json.loads(rrq.html)
            myfile = my_dicts["data"]
            return_info.status = SUCCESS
            return_info.msg_code = MsgCode.SUCCESS_CODE
            return_info.msg = "导出万方期刊信息成功"
            return_info.data = myfile
            return return_info.todict()
    except:
        return_info.status = FAILED
        return_info.msg_code = 400
        return_info.msg = "导出万方期刊信息失败"
        return_info.data = traceback.format_exc()
        return return_info.todict()


@router.get("/export/cnki_qk_info")
async def export_cnki():
    import pypyodbc
    return_info = ReturnInfo()
    task_name = "cnkijournal"
    task_tag = "cnkijournallist"
    int_list = ["cnki_exclusive", "individual_issue", "first_launch"]
    code_publisher_list = await CoreSqlMixin.get_code_publisher()
    sql = "select DatabaseName, EnglishAbbr from Edit_DatabaseCollectKind"
    type_dict = dict()
    mdb_dir = r"F:\采集工作记录\collect_abbr.accdb"
    connStr = r"Driver={Microsoft Access Driver (*.mdb,*.accdb)};DBQ=%s" % (mdb_dir)
    conn = pypyodbc.win_connect_mdb(connStr)
    curser = conn.cursor()
    rows_mdb = curser.execute(sql)
    key_list = list()
    value_list = list()
    for row_mdb in rows_mdb:
        type_dict[row_mdb[0]] = row_mdb[1]
        key_list.append(row_mdb[0])
        value_list.append(row_mdb[1])

    cnki_dict = {
        "SCI": "SCIE",
        "CA": "CAS",
        "社科基金资助期刊": "SKJJZZ",
        "北大核心": "BDHX",
        "CSCD扩展版": "CSCD_E",
        "Pж(AJ)": "AJ",
        "CSSCI扩展版": "CSSCI_E",
        "卓越期刊": "EAPJ",
        "CSSCI来源集刊": "CSSCI_C",
        "AMI权威": "AMI_A",
        "AMI外文刊权威": "AMI_A",
        "AMI核心": "AMI_C",
        "AMI集刊核心": "AMI_C",
        "AMI外文刊核心": "AMI_C",
        "AMI新刊核心": "AMI_C",
        "AMI职院刊核心": "AMI_C",
        "AMI扩展": "AMI_E",
        "AMI职院刊扩展": "AMI_E",
        "AMI集刊入库": "AMI_S",
        "AMI入库": "AMI_S",
        "AMI外文刊入库": "AMI_S",
        "AMI新刊入库": "AMI_S",
        "AMI职院刊入库": "AMI_S",
        "AMI顶级": "AMI_T",
    }
    cnki_dict_key_list = list()
    for cnki_dict_key, cnki_dict_value in cnki_dict.items():
        cnki_dict_key_list.append(cnki_dict_key)



    code_publisher_dict = dict()
    for row in code_publisher_list:
        code_publisher_dict[row["publisher"]] = row["country"]
    try:
        rows = await CoreSqlMixin.get_export_journallist(task_name, task_tag)
        data_list = []
        for info in rows:
            special_name_list = list()
            subject_name_list = list()
            journal_rawid = info["journal_rawid"]
            if not journal_rawid:
                continue
            # subjects = info["subject"].lstrip(";")
            # one_data_set = set()
            # if subjects != "":
            #     subject_list = subjects.split(";")
            #     for subject in subject_list:
            #         one_data_set.add(subject)
            #
            #     for data1 in one_data_set:
            #         data_list1 = data1.split("->")
            #         special_name_list.append(data_list1[0])
            #         subject_name_list.append(data_list1[1])
            #     special_name = ";".join(special_name_list)
            #     subject_name = ";".join(subject_name_list)
            # else:
            #     special_name = ""
            #     subject_name = ""
            row = json.loads(info["journal_json"])
            data = {}
            data["journal_rawid"] = journal_rawid
            data["special_name"] = BaseDicts().is_dict_exit_key(row, "special_name")
            data["subject_name"] = BaseDicts().is_dict_exit_key(row, "subject_name")
            data["journal_name"] = BaseDicts().is_dict_exit_key(row, "journal_name")
            data["journal_name_alt"] = BaseDicts().is_dict_exit_key(row, "journal_name_alt")
            cnki_impact_fh_1 = BaseDicts().is_dict_exit_key(row, "cnki_impact_fh_1")
            if cnki_impact_fh_1:
                data["cnki_impact_fh"] = cnki_impact_fh_1
            else:
                data["cnki_impact_fh"] = BaseDicts().is_dict_exit_key(row, "cnki_impact_fh")
            cnki_impact_zh_1 = BaseDicts().is_dict_exit_key(row, "cnki_impact_zh_1")
            if cnki_impact_zh_1:
                data["cnki_impact_zh"] = cnki_impact_zh_1
            else:
                data["cnki_impact_zh"] = BaseDicts().is_dict_exit_key(row, "cnki_impact_zh")
            data["journal_name_used"] = BaseDicts().is_dict_exit_key(row, "journal_name_used")
            data["publisher"] = BaseDicts().is_dict_exit_key(row, "publisher")
            publisher_list = data["publisher"].split(";")
            # for one_publisher in publisher_list:
            country = "CN"
            journalType = BaseDicts().is_dict_exit_key(row, "journalType")
            journal_type_list = journalType.split(";")
            j_list = list()
            for one_type in journal_type_list:
                if one_type in value_list:
                    j_list.append(one_type)
                elif one_type in key_list:
                    new_value = type_dict[one_type]
                    j_list.append(new_value)
                elif one_type in cnki_dict_key_list:
                    new_value = cnki_dict[one_type]
                    j_list.append(new_value)
                else:
                    j_list.append(one_type)
            data["collect_abbr"] = ";".join(j_list)
            for k, v in code_publisher_dict.items():
                if k in publisher_list:
                    country = v
                    break
                elif k in data["publisher"]:
                    country = v
                    break
            data["type_name"] = BaseDicts().is_dict_exit_key(row, "type_name")
            data["issn"] = BaseDicts().is_dict_exit_key(row, "issn")
            data["cnno"] = BaseDicts().is_dict_exit_key(row, "cnno")
            data["pub_place"] = BaseDicts().is_dict_exit_key(row, "pub_place")
            data["language"] = BaseDicts().is_dict_exit_key(row, "language").replace("中文", "ZH").replace("英文", "EN")
            data["country"] = country
            data["book_size"] = BaseDicts().is_dict_exit_key(row, "book_size")
            data["sem_code"] = BaseDicts().is_dict_exit_key(row, "sem_code")
            data["create_date"] = BaseDicts().is_dict_exit_key(row, "create_date")
            data["state"] = BaseDicts().is_dict_exit_key(row, "state")
            data["source_db"] = BaseDicts().is_dict_exit_key(row, "source_db")
            data["article_count"] = BaseDicts().is_dict_exit_key(row, "article_count").replace("篇", "")
            data["down_cnt"] = BaseDicts().is_dict_exit_key(row, "down_cnt").replace("次", "")
            data["cited_cnt"] = BaseDicts().is_dict_exit_key(row, "cited_cnt").replace("次", "")
            data["evaluate_info"] = BaseDicts().is_dict_exit_key(row, "evaluate_info")
            data["cnki_exclusive"] = BaseDicts().is_dict_exit_key(row, "cnki_exclusive", default=0)
            data["individual_issue"] = BaseDicts().is_dict_exit_key(row, "individual_issue", default=0)
            data["first_launch"] = BaseDicts().is_dict_exit_key(row, "first_launch", default=0)
            data["collect_newyear"] = BaseDicts().is_dict_exit_key(row, "collect_newyear").replace("None", "")
            provider_url = f"CNKI@https://navi.cnki.net/knavi/journals/{journal_rawid}/detail?uniplatform=NZKPT"
            data["provider_url"] = provider_url

            data_list.append(data)
        if len(data_list) == 0:
            return_info.status = FAILED
            return_info.msg_code = 400
            return_info.msg = "该任务目前无数据(含数据重置),请检查,或改日再提取"
            return_info.data = traceback.format_exc()
            return return_info.todict()
        url = get_settings().DB3_EXPORT_URL
        data = InputPlatformModel[SaveSqlite3](
            data=SaveSqlite3(
                task_tag=task_tag,
                data_list=data_list,
                int_list=int_list
            )
        ).json()
        rrq = RequestApiHelper.get_rrq()
        kwargs = {
            "rrq": rrq,
            "header": INSIDE_HEADERS,
            "url": url,
            "timeout": 30,
            "data": data,
            "moths": MRequest.POST,
            "middler_list": [rrq.status_code_middlerwares, rrq.is_none_html_middlerwares, rrq.is_null_html_middlerwares,
                             rrq.msg_status_code_middlerwares]
        }
        bools, dicts = await RequestApiHelper.etl_remote_meta(**kwargs)
        if bools:
            my_dicts = json.loads(rrq.html)
            myfile = my_dicts["data"]
            return_info.status = SUCCESS
            return_info.msg_code = MsgCode.SUCCESS_CODE
            return_info.msg = "导出cnki期刊信息成功"
            return_info.data = myfile
            return return_info.todict()
    except:
        return_info.status = FAILED
        return_info.msg_code = 400
        return_info.msg = "导出cnki期刊信息失败"
        return_info.data = traceback.format_exc()
        return return_info.todict()


@router.get("/export/chaoxing_qk_info")
async def export_cnki():
    # 用于定制化处理各种需求
    return_info = ReturnInfo()
    task_name = "chaoxingjournal"
    task_tag = "chaoxingjournallist"
    int_list = ["cnki_exclusive", "individual_issue", "first_launch"]
    basetime = BaseTime()
    batchtime = basetime.get_today_date_strings()
    code_publisher_list = await CoreSqlMixin.get_code_publisher()
    code_publisher_dict = dict()
    for row in code_publisher_list:
        code_publisher_dict[row["publisher"]] = row["country"]
    try:
        rows = await CoreSqlMixin.get_export_journallist(task_name, task_tag)
        data_list = []
        for info in rows:
            journal_rawid = info["journal_rawid"]
            row = json.loads(info["journal_json"])
            data = {}
            data["journal_rawid"] = journal_rawid
            data["journal_name"] = BaseDicts().is_dict_exit_key(row, "journal_name")
            data["publisher"] = BaseDicts().is_dict_exit_key(row, "publisher")
            publisher_list = data["publisher"].split(";")
            # for one_publisher in publisher_list:
            country = "CN"
            for k, v in code_publisher_dict.items():
                if k in publisher_list:
                    country = v
                    break
                elif k in data["publisher"]:
                    country = v
                    break
            data["issn"] = BaseDicts().is_dict_exit_key(row, "issn")
            data["cnno"] = BaseDicts().is_dict_exit_key(row, "cnno")
            data["research_field"] = BaseDicts().is_dict_exit_key(row, "provider_subject")
            data["language"] = BaseDicts().is_dict_exit_key(row, "language").replace("中文", "ZH").replace("英文", "EN")
            data["country"] = country
            data["type_name"] = BaseDicts().is_dict_exit_key(row, "type_name")
            cx_impact = BaseDicts().is_dict_exit_key(row, "impact")
            if cx_impact == "":
                data["cx_impact"] = ""
            else:
                data["cx_impact"] = cx_impact + "@" + batchtime
            data["journal_intro"] = BaseDicts().is_dict_exit_key(row, "journal_intro")
            data["collect_newyear"] = BaseDicts().is_dict_exit_key(row, "collect_newyear")
            data["provider_url"] = "http://qikan.chaoxing.com/mag/infos?mags=" + journal_rawid
            cited_cnt = BaseDicts().is_dict_exit_key(row, "cited_cnt")
            data["collect_database"] = BaseDicts().is_dict_exit_key(row, "CollectDatabase")
            if cited_cnt == "0":
                data["cited_cnt"] = ""
            else:
                data["cited_cnt"] = cited_cnt + "@" + batchtime
            data_list.append(data)
        if len(data_list) == 0:
            return_info.status = FAILED
            return_info.msg_code = 400
            return_info.msg = "该任务目前无数据(含数据重置),请检查,或改日再提取"
            return_info.data = traceback.format_exc()
            return return_info.todict()

        url = get_settings().DB3_EXPORT_URL
        data = InputPlatformModel[SaveSqlite3](
            data=SaveSqlite3(
                task_tag=task_tag,
                data_list=data_list,
                int_list=int_list
            )
        ).json()
        rrq = RequestApiHelper.get_rrq()
        kwargs = {
            "rrq": rrq,
            "header": INSIDE_HEADERS,
            "url": url,
            "timeout": 30,

            "data": data,
            "moths": MRequest.POST,
            "middler_list": [rrq.status_code_middlerwares, rrq.is_none_html_middlerwares, rrq.is_null_html_middlerwares,
                             rrq.msg_status_code_middlerwares]
        }
        bools, dicts = await RequestApiHelper.etl_remote_meta(**kwargs)
        if bools:
            my_dicts = json.loads(rrq.html)
            myfile = my_dicts["data"]
            return_info.status = SUCCESS
            return_info.msg_code = MsgCode.SUCCESS_CODE
            return_info.msg = "导出chaoxing期刊信息成功"
            return_info.data = myfile
            return return_info.todict()
    except:
        return_info.status = FAILED
        return_info.msg_code = 400
        return_info.msg = "导出chaoxing期刊信息失败"
        return_info.data = traceback.format_exc()
        return return_info.todict()


#
# @router.post("/wanfanghome/init")
# async def init_wf_home():
#     return_info = ReturnInfo()
#     lists = []
#     dicts = {
#         "home_rawid": "0/",
#         "task_name": "wanfangjournal",
#         "task_tag": "wanfangclasshome",
#         "sub_db_id": "00004",
#         "page_index": "0",
#         "home_json": "{}",
#     }
#     dicts1 = {
#         "home_rawid": "None",
#         "task_name": "wanfangmed",
#         "task_tag": "wanfangmedhome",
#         "sub_db_id": "00288",
#         "page_index": "1",
#         "home_json": "{}",
#     }
#     lists.append(dicts)
#     lists.append(dicts1)
#     for i in string.digits + string.ascii_uppercase:
#         t = dicts.copy()
#         t["home_rawid"] = i
#         t["task_tag"] = "wanfangqkhome"
#         lists.append(t)
#     list_v = []
#     list_key = list(lists[0].keys())
#     for item in lists:
#         list_v.append(tuple(item.values()))
#     bools, dicts = await CoreSqlMixin.insert_many_sql(SQLTable.journal_home, list_key, list_v,
#                                                       insert=CoreSqlValue.replace_it)
#     if not bools:
#         return_info.status = FAILED
#         return_info.msg_code = MsgCode.SQL_INSERT_ERROR
#         return_info.msg = "insert_many_sql 出现错误"
#     else:
#         return_info.status = SUCCESS
#         return_info.msg_code = 200
#         return_info.msg = "insert_many_sql 初始化成功"
#     return return_info.todict()

#
# @router.post("/cnkihome/init")
# async def init_cnki_home():
#     return_info = ReturnInfo()
#     lists = []
#     # 全部期刊 翻页初始化
#     dicts = {
#         "home_rawid": "None",
#         "task_name": "cnkijournal",
#         "task_tag": "cnkiqkhomeinit",
#         "sub_db_id": "00002",
#         "page_index": "1",
#         "home_json": "{}",
#     }
#     # 分类期刊初始化
#     dicts1 = {
#         "home_rawid": "None",
#         "task_name": "cnkijournal",
#         "task_tag": "cnkiclasshome",
#         "sub_db_id": "00002",
#         "page_index": "1",
#         "home_json": "{}",
#     }
#     # 辑刊初始化
#     dicts2 = {
#         "home_rawid": "None",
#         "task_name": "cnkijournal",
#         "task_tag": "cnkijclasshome",
#         "sub_db_id": "00002",
#         "page_index": "1",
#         "home_json": "{}",
#     }
#     dicts3 = {
#         "home_rawid": "None",
#         "task_name": "cnkicfjd",
#         "task_tag": "cnkicfjdhome",
#         "sub_db_id": "00169",
#         "page_index": "1",
#         "home_json": "{}",
#     }
#     dicts4 = {
#         "home_rawid": "None",
#         "task_name": "cnkicjfx",
#         "task_tag": "cnkicjfxhome",
#         "sub_db_id": "00451",
#         "page_index": "0",
#         "home_json": "{}",
#     }
#     dicts5 = {
#         "home_rawid": "None",
#         "task_name": "cnkicjfr",
#         "task_tag": "cnkicjfrhome",
#         "sub_db_id": "00452",
#         "page_index": "1",
#         "home_json": '{"parm":"{0}"}',
#     }
#     lists.append(dicts)
#     lists.append(dicts1)
#     lists.append(dicts2)
#     lists.append(dicts3)
#     lists.append(dicts4)
#     lists.append(dicts5)
#
#     list_v = []
#     list_key = list(lists[0].keys())
#     for item in lists:
#         list_v.append(tuple(item.values()))
#     bools, dicts = await CoreSqlMixin.insert_many_sql(SQLTable.journal_home, list_key, list_v,
#                                                       insert=CoreSqlValue.replace_it)
#     if not bools:
#         return_info.status = FAILED
#         return_info.msg_code = MsgCode.SQL_INSERT_ERROR
#         return_info.msg = "insert_many_sql 出现错误"
#     else:
#         return_info.status = SUCCESS
#         return_info.msg_code = 200
#         return_info.msg = "insert_many_sql 初始化成功"
#     return return_info.todict()
#
#
# @router.post("/chaoxinghome/init")
# async def init_cx_home():
#     return_info = ReturnInfo()
#     lists = []
#     dicts = {
#         "home_rawid": "None",
#         "task_name": "chaoxingjournal",
#         "task_tag": "chaoxinghome",
#         "sub_db_id": "00006",
#         "page_index": "1",
#         "home_json": "{}",
#     }
#     dicts1 = {
#         "home_rawid": "",
#         "task_name": "chaoxingjournal",
#         "task_tag": "chaoxingqkhomeclass",
#         "sub_db_id": "00006",
#         "page_index": "1",
#         "home_json": "{}",
#     }
#     lists.append(dicts)
#     lists.append(dicts1)
#     list_v = []
#     list_key = list(lists[0].keys())
#     for item in lists:
#         list_v.append(tuple(item.values()))
#     bools, dicts = await CoreSqlMixin.insert_many_sql(SQLTable.journal_home,
#                                                       list_key,
#                                                       list_v,
#                                                       insert=CoreSqlValue.replace_it)
#     if not bools:
#         return_info.status = FAILED
#         return_info.msg_code = MsgCode.SQL_INSERT_ERROR
#         return_info.msg = "insert_many_sql 出现错误"
#     else:
#         return_info.status = SUCCESS
#         return_info.msg_code = 200
#         return_info.msg = "insert_many_sql 初始化成功"
#     return return_info.todict()
#
#
# @router.post("/naturehome/init")
# async def init_naturehome_home():
#     return_info = ReturnInfo()
#     lists = []
#     dicts = {
#         "home_rawid": "None",
#         "task_name": "naturejournal",
#         "task_tag": "naturehome",
#         "sub_db_id": "00035",
#         "page_index": "0",
#         "is_active": "1",
#         "home_json": "{}",
#     }
#     lists.append(dicts)
#     list_v = []
#     list_key = list(lists[0].keys())
#     for item in lists:
#         list_v.append(tuple(item.values()))
#     bools, dicts = await CoreSqlMixin.insert_many_sql(SQLTable.journal_home, list_key, list_v,
#                                                       insert=CoreSqlValue.replace_it)
#     if not bools:
#         return_info.status = FAILED
#         return_info.msg_code = MsgCode.SQL_INSERT_ERROR
#         return_info.msg = "insert_many_sql 出现错误"
#     else:
#         return_info.status = SUCCESS
#         return_info.msg_code = 200
#         return_info.msg = "insert_many_sql 初始化成功"
#     return return_info.todict()


@router.post("/sqlite3/export")
async def export_qk_list(input: InputPlatformModel[SaveSqlite3]):
    # 用于通用型处理
    return_info = ReturnInfo()
    qk_liat_name = input.data.task_tag
    data_list = input.data.data_list
    int_list = input.data.int_list
    # 获取字段名
    field_name_dict = data_list[0]
    field_name_insert_count = len(field_name_dict)

    field_name_list = list()

    for k, v in field_name_dict.items():
        if k in int_list:
            text = f"'{k}' integer"
        else:
            text = f"'{k}' TEXT"
        # if k == "journal_rawid":
        #     text = f"'{k}' TEXT NOT NULL"
        # elif k == "cnki_exclusive":
        #     text = f"'{k}' integer"
        # elif k == "individual_issue":
        #     text = f"'{k}' integer"
        # elif k == "first_launch":
        #     text = f"'{k}' integer"
        # else:
        #     text = f"'{k}' TEXT"
        field_name_list.append(text)
    field_names = ",".join(field_name_list)
    sql_create = f"""CREATE TABLE "journal_info_list" ({field_names});"""

    temp = "..\\fastapi_cjserver_temp"
    if not BaseDir.is_dir_exists(temp):
        BaseDir.create_dir(temp)
    for file in BaseDir.get_dir_all_files(temp):
        if time.time() - BaseFile.get_update_time(file) > 60 * 10:  # 60 * 10
            if BaseFile.is_file_exists(file):
                BaseFile.remove_file(file)
    filepath = '{}\\{}_{}.db3'.format(temp, qk_liat_name, time.time())
    con_file = sqlite3.connect(filepath)
    cur_file = con_file.cursor()
    cur_file.execute(sql_create)
    con_file.commit()
    for data in data_list:
        value_list = list()
        field_name_insert_list = list()
        field_name_value_list = list()
        for key, value in data.items():
            field_name_insert_list.append(key)
            value_list.append(value)
        field_names_insert = ",".join(field_name_insert_list)
        # field_name_value = ",".join(field_name_value_list)

        [field_name_value_list.append("?") for i in range(field_name_insert_count)]
        field_name_value = ",".join(field_name_value_list)
        sql_insert = f"insert into journal_info_list({field_names_insert}) values({field_name_value})"
        values = tuple(value_list)
        cur_file.execute(sql_insert, values)
    con_file.commit()
    cur_file.close()
    zip_name = '{}\\{}_{}.zip'.format(temp, qk_liat_name, time.time())
    zp = zipfile.ZipFile(zip_name, 'w', zipfile.ZIP_DEFLATED)
    zp.write(filepath)
    zp.close()
    return_info.status = SUCCESS
    return_info.msg_code = MsgCode.SUCCESS_CODE
    return_info.msg = "成功导出DB3且压缩成功"
    return_info.data = zip_name
    return return_info


# 这种情况下用get或者post都不影响
@router.get("/getfile/get_one_file")
async def get_one_file(thismodel: FileGetFromWeb):
    return_info = ReturnInfo()
    url = ""
    if thismodel == FileGetFromWeb.chaoxing_list:
        url = get_settings().CHAOXING_DB3
    elif thismodel == FileGetFromWeb.cnki_subject_list:
        url = get_settings().SUBJECT_LIST
    elif thismodel == FileGetFromWeb.cnki_subject_map:
        url = get_settings().SUBJECT_MAP
    elif thismodel == FileGetFromWeb.cnki_list:
        url = get_settings().CNKI_DB3
    elif thismodel == FileGetFromWeb.wanfang_list:
        url = get_settings().WANFANG_DB3

    if url == "":
        return_info.status = FAILED
        return_info.msg_code = 400
        return_info.msg = "输入参数错误, 请检查"
        return_info.data = traceback.format_exc()
        return return_info.todict()

    rrq = RequestApiHelper.get_rrq()
    kwargs = {
        "rrq": rrq,
        "header": INSIDE_HEADERS,
        "url": url,
        "timeout": 30,

        "data": "",
        "moths": MRequest.GET,
        "middler_list": [rrq.status_code_middlerwares, rrq.is_none_html_middlerwares, rrq.is_null_html_middlerwares,
                         rrq.msg_status_code_middlerwares]
    }
    bools, dicts = await RequestApiHelper.etl_remote_meta(**kwargs)
    if bools:
        my_dicts = json.loads(rrq.html)
        myfile = my_dicts["data"]
        name = myfile.split("\\")[-1]
        return FileResponse(myfile, filename=name)
    else:
        my_dicts = json.loads(rrq.html)
        return_info.status = FAILED
        return_info.msg_code = 400
        return_info.msg = my_dicts["msg"]
        return_info.data = dicts
        return return_info.todict()


@router.get("/cnki/subject_list")
async def export_qk_list():
    return_info = ReturnInfo()
    task_name = "cnkijournal"
    task_tag = "cnkijournallist"
    one_data_set = set()

    # 用于第一个仅有一级学科的TXT
    all_data_set = set()
    all_data_list = list()
    rows = await CoreSqlMixin.get_export_journallist(task_name, task_tag)

    for info in rows:
        subjects = info["subject"]
        subjects = subjects.lstrip(";")
        if subjects != "":
            subject_list = subjects.split(";")
            for subject in subject_list:
                one_data_set.add(subject)

            for data in one_data_set:
                data_list = data.split("->")
                all_data_set.add(data_list[0])
    temp = "..\\fastapi_cjserver_temp"
    if not BaseDir.is_dir_exists(temp):
        BaseDir.create_dir(temp)
    for file in BaseDir.get_dir_all_files(temp):
        if time.time() - BaseFile.get_update_time(file) > 60 * 10:  # 60 * 10
            if BaseFile.is_file_exists(file):
                BaseFile.remove_file(file)
    for data in all_data_set:
        all_data_list.append(data)
    if len(all_data_list) == 0:
        return_info.status = FAILED
        return_info.msg_code = 400
        return_info.msg = "该任务目前无数据(含数据重置),请检查,或改日再提取"
        return_info.data = ""
        return return_info
    datas = "\n".join(all_data_list)
    filepath = '{}\\{}_{}.txt'.format(temp, task_name + "_subject_list", time.time())
    with open(filepath, mode="w", encoding="utf-8") as f:
        f.write(datas)
    return_info.status = SUCCESS
    return_info.msg_code = MsgCode.SUCCESS_CODE
    return_info.msg = "成功导出"
    return_info.data = filepath
    return return_info


@router.get("/cnki/subject_map")
async def export_qk_list():
    return_info = ReturnInfo()
    task_name = "cnkijournal"
    task_tag = "cnkijournallist"

    all_data_list = list()
    rows = await CoreSqlMixin.get_export_journallist(task_name, task_tag)

    for info in rows:
        subjects = info["subject"]
        subjects = subjects.lstrip(";")
        local_data_set = set()

        if subjects != "":
            one_data_set = set()
            local_data_list = list()
            subject_list = subjects.split(";")
            for subject in subject_list:
                one_data_set.add(subject)

            for data in one_data_set:
                data_list = data.split("->")
                # 用于第二次确定一级学科的唯一值
                local_data_set.add(data_list[0])
            for one_data in local_data_set:
                local_data_list.append(one_data)
            my_data = ";".join(local_data_list)
            TXT_data = info["journal_rawid"] + "@" + task_name + "★" + my_data
            all_data_list.append(TXT_data)

    temp = "..\\fastapi_cjserver_temp"
    if not BaseDir.is_dir_exists(temp):
        BaseDir.create_dir(temp)
    for file in BaseDir.get_dir_all_files(temp):
        if time.time() - BaseFile.get_update_time(file) > 60 * 10:  # 60 * 10
            if BaseFile.is_file_exists(file):
                BaseFile.remove_file(file)
    if len(all_data_list) == 0:
        return_info.status = FAILED
        return_info.msg_code = 400
        return_info.msg = "该任务目前无数据(含数据重置),请检查,或改日再提取"
        return_info.data = ""
        return return_info
    datas = "\n".join(all_data_list)
    filepath = '{}\\{}_{}.txt'.format(temp, task_name + "_subject_map", time.time())
    with open(filepath, mode="w", encoding="utf-8") as f:
        f.write(datas)
    return_info.status = SUCCESS
    return_info.msg_code = MsgCode.SUCCESS_CODE
    return_info.msg = "成功导出"
    return_info.data = filepath
    return return_info


@router.post("/mdb/export")
async def export_qk_list(input: InputPlatformModel[SaveMdb]):
    # 用于通用型处理
    from comtypes.client import CreateObject
    access = CreateObject('Access.Application')
    from comtypes.gen import Access
    return_info = ReturnInfo()
    qk_liat_name = input.data.task_tag
    data_list = input.data.data_list
    int_list = input.data.int_list
    long_text_list = input.data.long_text_list
    # 获取字段名
    field_name_dict = data_list[0]
    field_name_insert_count = len(field_name_dict)

    field_name_list = list()

    for k, v in field_name_dict.items():
        if k in int_list:
            text = f"{k} integer"
        elif k in long_text_list:
            text = f"{k} LONGTEXT"
        else:
            text = f"{k} TEXT"

        field_name_list.append(text)
    field_names = ",".join(field_name_list)
    sql_create = f"""CREATE TABLE journal_info_list ({field_names});"""
    temp = r"E:\mdb_export"
    if not BaseDir.is_dir_exists(temp):
        BaseDir.create_dir(temp)
    for file in BaseDir.get_dir_all_files(temp):
        if time.time() - BaseFile.get_update_time(file) > 60 * 10:  # 60 * 10
            if BaseFile.is_file_exists(file):
                BaseFile.remove_file(file)
    filepath = '{}\\{}_{}.mdb'.format(temp, qk_liat_name, time.time())
    DBEngine = access.DBEngine
    db = DBEngine.CreateDatabase(filepath, Access.DB_LANG_GENERAL)
    db.BeginTrans()
    db.Execute(sql_create)
    for data in data_list:
        value_list = list()
        field_name_insert_list = list()
        field_name_value_list = list()
        for key, value in data.items():
            field_name_insert_list.append(key)
            value_list.append(value)
        field_names_insert = ",".join(field_name_insert_list)
        # field_name_value = ",".join(field_name_value_list)

        [field_name_value_list.append("?") for i in range(field_name_insert_count)]
        field_name_value = ",".join(field_name_value_list)
        # sql_insert = f"insert into journal_info_list({field_names_insert}) values({field_name_value})"
        values = tuple(value_list)
        try:
            db.Execute(f"insert into journal_info_list({field_names_insert}) values{values}")
        except:
            with open("error_wanfang.txt", mode="a", encoding="utf-8") as f:
                f.write(f"insert into journal_info_list({field_names_insert}) values{values}" + "\n")
        # cur_file.execute(sql_insert, values)
    db.CommitTrans()
    db.Close()
    # zip_name = '{}\\{}_{}.zip'.format(temp, qk_liat_name, time.time())
    # zp = zipfile.ZipFile(zip_name, 'w', zipfile.ZIP_DEFLATED)
    # zp.write(filepath)
    # zp.close()
    return_info.status = SUCCESS
    return_info.msg_code = MsgCode.SUCCESS_CODE
    return_info.msg = "成功导出MDB"
    return_info.data = filepath
    return return_info


@router.get("/export/chaoxing_qk_info_mdb")
async def export_cnki():
    # 用于定制化处理各种需求
    return_info = ReturnInfo()
    task_name = "chaoxingjournal"
    task_tag = "chaoxingjournallist"
    int_list = ["cnki_exclusive", "individual_issue", "first_launch"]
    long_text_list = ["journal_intro", "collect_database"]
    basetime = BaseTime()
    batchtime = basetime.get_today_date_strings()
    code_publisher_list = await CoreSqlMixin.get_code_publisher()
    code_publisher_dict = dict()
    for row in code_publisher_list:
        code_publisher_dict[row["publisher"]] = row["country"]
    try:
        rows = await CoreSqlMixin.get_export_journallist(task_name, task_tag)
        data_list = []
        for info in rows:
            journal_rawid = info["journal_rawid"]
            row = json.loads(info["journal_json"])
            data = {}
            data["journal_rawid"] = journal_rawid
            data["journal_name"] = BaseDicts().is_dict_exit_key(row, "journal_name")
            data["publisher"] = BaseDicts().is_dict_exit_key(row, "publisher")
            publisher_list = data["publisher"].split(";")
            # for one_publisher in publisher_list:
            country = "CN"
            for k, v in code_publisher_dict.items():
                if k in publisher_list:
                    country = v
                    break
                elif k in data["publisher"]:
                    country = v
                    break
            data["issn"] = BaseDicts().is_dict_exit_key(row, "issn")
            data["cnno"] = BaseDicts().is_dict_exit_key(row, "cnno")
            data["research_field"] = BaseDicts().is_dict_exit_key(row, "provider_subject")
            data["language"] = BaseDicts().is_dict_exit_key(row, "language").replace("中文", "ZH").replace("英文", "EN")
            data["country"] = country
            data["type_name"] = BaseDicts().is_dict_exit_key(row, "type_name")
            cx_impact = BaseDicts().is_dict_exit_key(row, "impact")
            if cx_impact == "":
                data["cx_impact"] = ""
            else:
                data["cx_impact"] = cx_impact + "@" + batchtime
            data["journal_intro"] = BaseDicts().is_dict_exit_key(row, "journal_intro")
            data["collect_newyear"] = BaseDicts().is_dict_exit_key(row, "collect_newyear")
            data["provider_url"] = "http://qikan.chaoxing.com/mag/infos?mags=" + journal_rawid
            cited_cnt = BaseDicts().is_dict_exit_key(row, "cited_cnt")
            data["collect_database"] = BaseDicts().is_dict_exit_key(row, "CollectDatabase")
            if cited_cnt == "0":
                data["cited_cnt"] = ""
            else:
                data["cited_cnt"] = cited_cnt + "@" + batchtime
            data_list.append(data)
        if len(data_list) == 0:
            return_info.status = FAILED
            return_info.msg_code = 400
            return_info.msg = "该任务目前无数据(含数据重置),请检查,或改日再提取"
            return_info.data = traceback.format_exc()
            return return_info.todict()

        url = get_settings().MDB_EXPORT_URL
        # url = "http://192.168.31.188:8001/crawler_platform/down_journallist_routers/mdb/export"
        data = InputPlatformModel[SaveMdb](
            data=SaveMdb(
                task_tag=task_tag,
                data_list=data_list,
                int_list=int_list,
                long_text_list=long_text_list
            )
        ).json()
        rrq = RequestApiHelper.get_rrq()
        kwargs = {
            "rrq": rrq,
            "header": INSIDE_HEADERS,
            "url": url,
            "timeout": 50,

            "data": data,
            "moths": MRequest.POST,
            "middler_list": [rrq.status_code_middlerwares, rrq.is_none_html_middlerwares, rrq.is_null_html_middlerwares,
                             rrq.msg_status_code_middlerwares]
        }
        bools, dicts = await RequestApiHelper.etl_remote_meta(**kwargs)
        if bools:
            my_dicts = json.loads(rrq.html)
            myfile = my_dicts["data"]
            return_info.status = SUCCESS
            return_info.msg_code = MsgCode.SUCCESS_CODE
            return_info.msg = "导出chaoxing期刊信息成功"
            return_info.data = myfile
            return return_info.todict()
    except:
        return_info.status = FAILED
        return_info.msg_code = 400
        return_info.msg = "导出chaoxing期刊信息失败"
        return_info.data = traceback.format_exc()
        return return_info.todict()


@router.get("/export/cnki_qk_info_mdb")
async def export_cnki():
    return_info = ReturnInfo()
    import pypyodbc
    task_name = "cnkijournal"
    task_tag = "cnkijournallist"
    int_list = ["cnki_exclusive", "individual_issue", "first_launch"]
    long_text_list = ["evaluate_info", "journal_name_used", "journal_name_used"]
    code_publisher_list = await CoreSqlMixin.get_code_publisher()
    code_publisher_dict = dict()
    sql = "select DatabaseName, EnglishAbbr from Edit_DatabaseCollectKind"

    type_dict = dict()
    mdb_dir = r"F:\采集工作记录\collect_abbr.accdb"
    connStr = r"Driver={Microsoft Access Driver (*.mdb,*.accdb)};DBQ=%s" % (mdb_dir)
    conn = pypyodbc.win_connect_mdb(connStr)
    curser = conn.cursor()
    rows_mdb = curser.execute(sql)
    key_list = list()
    value_list = list()
    for row_mdb in rows_mdb:
        type_dict[row_mdb[0]] = row_mdb[1]
        key_list.append(row_mdb[0])
        value_list.append(row_mdb[1])

    cnki_dict = {
        "SCI": "SCIE",
        "CA": "CAS",
        "社科基金资助期刊": "SKJJZZ",
        "北大核心": "BDHX",
        "CSCD扩展版": "CSCD_E",
        "Pж(AJ)": "AJ",
        "CSSCI扩展版": "CSSCI_E",
        "卓越期刊": "EAPJ",
        "CSSCI来源集刊": "CSSCI_C",
        "AMI权威": "AMI_A",
        "AMI外文刊权威": "AMI_A",
        "AMI核心": "AMI_C",
        "AMI集刊核心": "AMI_C",
        "AMI外文刊核心": "AMI_C",
        "AMI新刊核心": "AMI_C",
        "AMI职院刊核心": "AMI_C",
        "AMI扩展": "AMI_E",
        "AMI职院刊扩展": "AMI_E",
        "AMI集刊入库": "AMI_S",
        "AMI入库": "AMI_S",
        "AMI外文刊入库": "AMI_S",
        "AMI新刊入库": "AMI_S",
        "AMI职院刊入库": "AMI_S",
        "AMI顶级": "AMI_T",
    }
    cnki_dict_key_list = list()
    for cnki_dict_key, cnki_dict_value in cnki_dict.items():
        cnki_dict_key_list.append(cnki_dict_key)

    for row in code_publisher_list:
        code_publisher_dict[row["publisher"]] = row["country"]
    try:
        rows = await CoreSqlMixin.get_export_journallist(task_name, task_tag)
        data_list = []
        for info in rows:
            # special_name_list = list()
            # subject_name_list = list()
            journal_rawid = info["journal_rawid"]
            if not journal_rawid:
                continue
            # subjects = info["subject"].lstrip(";")
            # one_data_set = set()
            # if subjects != "":
            #     subject_list = subjects.split(";")
            #     for subject in subject_list:
            #         one_data_set.add(subject)
            #
            #     for data1 in one_data_set:
            #         data_list1 = data1.split("->")
            #         special_name_list.append(data_list1[0])
            #         subject_name_list.append(data_list1[1])
            #     special_name = ";".join(special_name_list)
            #     subject_name = ";".join(subject_name_list)
            # else:
            #     special_name = ""
            #     subject_name = ""
            row = json.loads(info["journal_json"])
            data = {}
            data["journal_rawid"] = journal_rawid
            data["special_name"] = BaseDicts().is_dict_exit_key(row, "special_name")
            data["subject_name"] = BaseDicts().is_dict_exit_key(row, "subject_name")
            data["journal_name"] = BaseDicts().is_dict_exit_key(row, "journal_name")
            data["journal_name_alt"] = BaseDicts().is_dict_exit_key(row, "journal_name_alt")
            cnki_impact_fh_1 = BaseDicts().is_dict_exit_key(row, "cnki_impact_fh_1")
            if cnki_impact_fh_1:
                data["cnki_impact_fh"] = cnki_impact_fh_1
            else:
                data["cnki_impact_fh"] = BaseDicts().is_dict_exit_key(row, "cnki_impact_fh")
            cnki_impact_zh_1 = BaseDicts().is_dict_exit_key(row, "cnki_impact_zh_1")
            if cnki_impact_zh_1:
                data["cnki_impact_zh"] = cnki_impact_zh_1
            else:
                data["cnki_impact_zh"] = BaseDicts().is_dict_exit_key(row, "cnki_impact_zh")
            data["journal_name_used"] = BaseDicts().is_dict_exit_key(row, "journal_name_used")
            data["publisher"] = BaseDicts().is_dict_exit_key(row, "publisher")
            publisher_list = data["publisher"].split(";")
            # for one_publisher in publisher_list:
            country = "CN"
            journalType = BaseDicts().is_dict_exit_key(row, "journalType")
            journal_type_list = journalType.split(";")
            j_list = list()
            for one_type in journal_type_list:
                if one_type in value_list:
                    j_list.append(one_type)
                elif one_type in key_list:
                    new_value = type_dict[one_type]
                    j_list.append(new_value)
                elif one_type in cnki_dict_key_list:
                    new_value = cnki_dict[one_type]
                    j_list.append(new_value)
                else:
                    j_list.append(one_type)
                    # with open("collect_abbr.txt", mode="a", encoding="utf-8") as f:
                    #     f.write(one_type + "\n")
            data["collect_abbr"] = ";".join(j_list)
            for k, v in code_publisher_dict.items():
                if k in publisher_list:
                    country = v
                    break
                elif k in data["publisher"]:
                    country = v
                    break
            data["type_name"] = BaseDicts().is_dict_exit_key(row, "type_name")
            data["issn"] = BaseDicts().is_dict_exit_key(row, "issn")
            data["cnno"] = BaseDicts().is_dict_exit_key(row, "cnno")
            data["pub_place"] = BaseDicts().is_dict_exit_key(row, "pub_place")
            data["language"] = BaseDicts().is_dict_exit_key(row, "language").replace("中文", "ZH").replace("英文", "EN")
            data["country"] = country
            data["book_size"] = BaseDicts().is_dict_exit_key(row, "book_size")
            data["sem_code"] = BaseDicts().is_dict_exit_key(row, "sem_code")
            data["create_date"] = BaseDicts().is_dict_exit_key(row, "create_date")
            data["state"] = BaseDicts().is_dict_exit_key(row, "state")
            data["source_db"] = BaseDicts().is_dict_exit_key(row, "source_db")
            data["article_count"] = BaseDicts().is_dict_exit_key(row, "article_count").replace("篇", "")
            data["down_cnt"] = BaseDicts().is_dict_exit_key(row, "down_cnt").replace("次", "")
            data["cited_cnt"] = BaseDicts().is_dict_exit_key(row, "cited_cnt").replace("次", "")
            data["evaluate_info"] = BaseDicts().is_dict_exit_key(row, "evaluate_info")
            data["cnki_exclusive"] = BaseDicts().is_dict_exit_key(row, "cnki_exclusive", default=0)
            data["individual_issue"] = BaseDicts().is_dict_exit_key(row, "individual_issue", default=0)
            data["first_launch"] = BaseDicts().is_dict_exit_key(row, "first_launch", default=0)
            data["collect_newyear"] = BaseDicts().is_dict_exit_key(row, "collect_newyear").replace("None", "")
            provider_url = f"CNKI@https://navi.cnki.net/knavi/journals/{journal_rawid}/detail?uniplatform=NZKPT"
            data["provider_url"] = provider_url

            data_list.append(data)
        if len(data_list) == 0:
            return_info.status = FAILED
            return_info.msg_code = 400
            return_info.msg = "该任务目前无数据(含数据重置),请检查,或改日再提取"
            return_info.data = traceback.format_exc()
            return return_info.todict()
        url = get_settings().MDB_EXPORT_URL
        data = InputPlatformModel[SaveMdb](
            data=SaveMdb(
                task_tag=task_tag,
                data_list=data_list,
                int_list=int_list,
                long_text_list=long_text_list
            )
        ).json()
        rrq = RequestApiHelper.get_rrq()
        kwargs = {
            "rrq": rrq,
            "header": INSIDE_HEADERS,
            "url": url,
            "timeout": 50,
            "data": data,
            "moths": MRequest.POST,
            "middler_list": [rrq.status_code_middlerwares, rrq.is_none_html_middlerwares, rrq.is_null_html_middlerwares,
                             rrq.msg_status_code_middlerwares]
        }
        bools, dicts = await RequestApiHelper.etl_remote_meta(**kwargs)
        if bools:
            my_dicts = json.loads(rrq.html)
            myfile = my_dicts["data"]
            return_info.status = SUCCESS
            return_info.msg_code = MsgCode.SUCCESS_CODE
            return_info.msg = "导出cnki期刊信息成功"
            return_info.data = myfile
            return return_info.todict()
    except:
        return_info.status = FAILED
        return_info.msg_code = 400
        return_info.msg = "导出cnki期刊信息失败"
        return_info.data = traceback.format_exc()
        return return_info.todict()


@router.get("/export/wanfang_qk_info_mdb")
async def export_wanfang():
    return_info = ReturnInfo()
    task_name = "wanfangjournal"
    task_tag = "wanfangjournallist"
    code_publisher_list = await CoreSqlMixin.get_code_publisher()
    code_publisher_dict = dict()
    long_text_list = ["journal_intro", "award_state", "collect_database"]
    for row in code_publisher_list:
        code_publisher_dict[row["publisher"]] = row["country"]
    try:
        rows = await CoreSqlMixin.get_export_journallist(task_name, task_tag)
        data_list = []
        for info in rows:
            journal_rawid = info["journal_rawid"]
            row = json.loads(info["journal_json"])
            data = {}
            data["journal_rawid"] = journal_rawid
            last_year1 = row['last_year']
            other_dicts_1 = info["other_dicts"]
            other_dicts_2 = json.loads(other_dicts_1)
            last_year2 = BaseDicts.is_dict_exit_key(other_dicts_2, "last_year")
            collect_database = row.get("collect_database")
            if not collect_database:
                collect_database = other_dicts_2.get("collect_database", "")
            if last_year1 != last_year2:
                last_year = last_year2
            else:
                last_year = last_year1
            if len(last_year) == 0:
                # 出现情况, 外围list提供了这本刊, 但是内部下架, 因此和孔老师讨论后, 决定这样的情况跳过
                continue
            data["journal_name"] = row['qk_name']
            trans_title = row['trans_title'].strip()
            lst = trans_title.split('  ')
            data["journal_name_alt"] = lst[0]
            journal_name_korea = ''
            if len(lst) == 2:
                journal_name_korea = lst[1]
            data["journal_name_korea"] = journal_name_korea
            data["issn"] = row['issn'].replace("无", "").replace("ISSN", "").strip()
            data["cnno"] = row['cn'].replace("无", "").replace("CN", "").strip()
            data["publisher"] = row['hostunit_name']
            publisher_list = data["publisher"].split(";")
            country = "CN"
            for k, v in code_publisher_dict.items():
                if k in publisher_list:
                    country = v
                    break
                elif k in data["publisher"]:
                    country = v
                    break
            data["director_dept"] = row['dep_name']
            data["type_name"] = row['type_name']  # row['publish_cycle']
            data["award_state"] = row['award_state']
            data["collect_database"] = collect_database
            language = row['language']
            if language == 'c;h;i':
                language = '中文'
            elif language == 'e;n;g':
                language = '英文'
            data["language"] = language.replace("中文", "ZH").replace("英文", "EN")
            data["country"] = country
            wf_impact = ''
            if row['wf_impact'] != '' and float(row['wf_impact']) > 0:
                wf_impact = '%.2f' % (float(row['wf_impact']))
                wf_impact = '%s@%s' % (str(wf_impact), info['update_time'].strftime('%Y%m%d'))
            data["wf_impact"] = wf_impact
            data["chief_editor"] = row['chief_editor']
            data["journal_name_used"] = row['former_name']
            data["journal_intro"] = row['perio_desc']
            data["post_code"] = row['post_code']
            data["is_stop"] = row['is_stop']
            data["email"] = row['email']
            data["edit_office_addr"] = row['edit_office_addr']
            data["tel_code"] = row['tel_code']
            data["web_site"] = row['web_site']
            data["fax"] = row['fax']
            data["subject"] = info['subject'].lstrip(";")
            data["collect_newyear"] = last_year
            provider_url = f"WF@https://sns.wanfangdata.com.cn/perio/{journal_rawid}"
            data["provider_url"] = provider_url
            data_list.append(data)
        if len(data_list) == 0:
            return_info.status = FAILED
            return_info.msg_code = 400
            return_info.msg = "该任务目前无数据(含数据重置),请检查,或改日再提取"
            return_info.data = traceback.format_exc()
            return return_info.todict()
        url = get_settings().MDB_EXPORT_URL
        data = InputPlatformModel[SaveMdb](
            data=SaveMdb(
                task_tag=task_tag,
                data_list=data_list,
                long_text_list=long_text_list
            )
        ).json()
        rrq = RequestApiHelper.get_rrq()
        kwargs = {
            "rrq": rrq,
            "header": INSIDE_HEADERS,
            "url": url,
            "timeout": 50,
            "data": data,
            "moths": MRequest.POST,
            "middler_list": [rrq.status_code_middlerwares, rrq.is_none_html_middlerwares, rrq.is_null_html_middlerwares,
                             rrq.msg_status_code_middlerwares]
        }
        bools, dicts = await RequestApiHelper.etl_remote_meta(**kwargs)
        if bools:
            my_dicts = json.loads(rrq.html)
            myfile = my_dicts["data"]
            return_info.status = SUCCESS
            return_info.msg_code = MsgCode.SUCCESS_CODE
            return_info.msg = "导出万方期刊信息成功"
            return_info.data = myfile
            return return_info.todict()
    except:
        return_info.status = FAILED
        return_info.msg_code = 400
        return_info.msg = "导出万方期刊信息失败"
        return_info.data = traceback.format_exc()
        return return_info.todict()


@router.get("/getfile/get_one_mdb")
async def get_one_file(thismodel: FileGetFromWeb):
    return_info = ReturnInfo()
    url = ""
    if thismodel == FileGetFromWeb.chaoxing_list:
        url = get_settings().CHAOXING_MDB
        # url = "http://192.168.31.188:8001/crawler_platform/down_journallist_routers/export/chaoxing_qk_info_mdb"
    elif thismodel == FileGetFromWeb.cnki_list:
        url = get_settings().CNKI_MDB
    elif thismodel == FileGetFromWeb.wanfang_list:
        url = get_settings().WANFANG_MDB

    if url == "":
        return_info.status = FAILED
        return_info.msg_code = 400
        return_info.msg = "输入参数错误, 请检查"
        return_info.data = traceback.format_exc()
        return return_info.todict()

    rrq = RequestApiHelper.get_rrq()
    kwargs = {
        "rrq": rrq,
        "header": INSIDE_HEADERS,
        "url": url,
        "timeout": 100,

        "data": "",
        "moths": MRequest.GET,
        "middler_list": [rrq.status_code_middlerwares, rrq.is_none_html_middlerwares, rrq.is_null_html_middlerwares,
                         rrq.msg_status_code_middlerwares]
    }
    bools, dicts = await RequestApiHelper.etl_remote_meta(**kwargs)
    if bools:
        my_dicts = json.loads(rrq.html)
        myfile = my_dicts["data"]
        name = myfile.split("\\")[-1]
        return FileResponse(myfile, filename=name)
    else:
        my_dicts = json.loads(rrq.html)
        return_info.status = FAILED
        return_info.msg_code = 400
        try:
            return_info.msg = my_dicts["msg"]
        except:
            pass
        return_info.data = dicts
        return return_info.todict()


@router.post("/chaoxingjournal/issue_parse")
async def issue_parse(input: InputPlatformModel[ChaoXingIssueHtml]):
    '''
    发送id_list 发送到 提取html的接口, 接口返回html后 在本接口里面进行解析, 解析完成后打标, 打标后存入mongo的一个新表内, 然后进行提取
    :param input:
    :return:
    '''
    return_info = ReturnInfo()
    id_list = input.data.id_list
    another_list = list()
    id_dicts = dict()

    url = await URLDISTRIBUTED.get_chaoxing_issue_html_url()
    for row in id_list:
        data_dict = dict()
        data_dict["task_name"] = row[1]
        data_dict["task_tag"] = row[2]
        data_dict["journal_rawid"] = row[3]
        data_dict["pub_year"] = row[4]
        data_dict["num"] = row[5]
        another_list.append(data_dict)
        value = "_".join([row[1], row[2], row[3], row[4], row[5]])
        id_dicts[row[0]] = list()
        data = InputPlatformModel[GetCommanderHtml](
            data=GetCommanderHtml(
                task_name="chaoxingjournal",
                task_tag="chaoxingissue",
                id_list=another_list,
                infos={"last_data.down_dict.1_1.html": 1}
            )
        ).json()
        another_list.clear()
        rrq = RequestApiHelper.get_rrq()
        kwargs = {
            "rrq": rrq,
            "header": INSIDE_HEADERS,
            "url": url,
            "timeout": 100,

            "data": data,
            "moths": MRequest.POST,
            "middler_list": [rrq.status_code_middlerwares, rrq.is_none_html_middlerwares, rrq.is_null_html_middlerwares,
                             rrq.msg_status_code_middlerwares]
        }
        bools, dicts = await RequestApiHelper.etl_remote_meta(**kwargs)
        if bools:
            my_dicts = json.loads(rrq.html)
            myfile = my_dicts["data"]
            for one_file in myfile:
                for key, value in one_file.items():
                    html = value["1_1"]["html"]
                    html_format = json.loads(html)
                    datainfo = html_format["data"][0]["datainfo"]["data"]
                    for item in datainfo:
                        id_dicts[row[0]].append(item["basic_dxid"])

    return_info.status = SUCCESS
    return_info.msg_code = MsgCode.SUCCESS_CODE
    return_info.msg = "导出万方期刊信息成功"
    return_info.data = id_dicts
    return return_info.todict()


@router.post("/cnkijournallist/get_cookie")
async def get_cookie(request:Request):
    return_info = ReturnInfo()
    url = "https://kns.cnki.net/kns8?dbcode=CFLQ"
    header_dict = {
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
        "Accept-Encoding": "gzip, deflate",
        "Accept-Language": "zh-CN,zh;q=0.9",
        "Host": "kns.cnki.net",
        "Referer": "https://www.cnki.net/",
        "Upgrade-Insecure-Requests": "1",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36"
    }
    bools, proxies = await get_proxy("cnkijournal", "cnkijournallist_cookie")
    if not bools:
        return_info.status = FAILED
        return_info.msg_code = MsgCode.PROXY_ERROR
        return_info.msg = "代理获取失败"
        return_info.data = ""
        return return_info.todict()

    rrq = RequestApiHelper.get_rrq()
    kwargs = {
        "rrq": rrq,
        "header": header_dict,
        "url": url,
        "timeout": 30,
        "proxy": proxies,
        "moths": MRequest.GET,
        "marks": ["p-intro"],
        "allow_redirects": True
    }
    bools, dicts = await RequestApiHelper.normal_request(**kwargs)
    if not bools:
        return_info.status = FAILED
        return_info.msg_code = MsgCode.API_FAIL_CODE
        return_info.msg = "请求获取cookie失败"
        return_info.data = dicts
        return return_info.todict()
    items = rrq.resp.cookies.items()
    cookies = dict([(value.key, value.coded_value) for key, value in items])
    redis = request.app.state.redis
    res_value = await redis.hset("cookie", "cnkijournal_cnkijournallist_cookie", json.dumps(cookies, ensure_ascii=False))
    return_info.status = SUCCESS
    return_info.msg_code = MsgCode.SUCCESS_CODE
    return_info.msg = "请求获取cookie成功"
    return_info.data = cookies
    return return_info.todict()

@router.get("/conn/conn_server")
async def export_wanfang():
    return_info = ReturnInfo()
    return_info.status = SUCCESS
    return_info.msg_code = MsgCode.SUCCESS_CODE
    return_info.msg = "该端口存活"
    return_info.data = ""
    return return_info.todict()
