import copy
import datetime
import json
import re
import time

from parsel import Selector
from re_common.baselibrary.database.mysql import json_update
from re_common.baselibrary.tools.all_requests.mrequest import MRequest
from re_common.baselibrary.utils.basedict import BaseDicts
from re_common.baselibrary.utils.basetime import BaseTime
from re_common.baselibrary.utils.core.requests_core import INSIDE_HEADERS
from re_common.vip.baseencodeid import BaseLngid

from apps.crawler_platform.core_platform.core_g import SQLTable, CoreSqlValue
from apps.crawler_platform.core_platform.g_model import DealModel, InputPlatformModel, journalInputMode, \
    RedisAllTaskModel, CallBackModel, JournalListModel, JournalIssueModel, DealInsertModel, JournalHomeModel, \
    DealUpdateModel, OperatorSqlModel, EtlDealModel
from apps.crawler_platform.util.requestapihelper import RequestApiHelper
from apps.crawler_platform.util.sqlhelper import SQLHelper
from settings import get_settings

__all__ = ["para_key_callback",
           "chaoxingjournal_chaoxingjournallist_callback",
           "chaoxingjournal_chaoxingjournallist_redis_callback",
           "chaoxingjournal_chaoxingissue_callback",
           "chaoxingjournal_chaoxingarticle_callback",
           "chaoxingjournal_chaoxinghome_callback",
           "chaoxingjournal_chaoxinghomeclass_callback",
           "chaoxingjournal_chaoxingqkhomeclass_callback",
           "chaoxingjournal_chaoxingarticle_etl_callback"
           ]


def para_key_callback(dicts):
    if isinstance(dicts, dict):
        for k, v in dicts.items():
            if not isinstance(v, (list, tuple, dict)):
                if k == "key_name":
                    v = v.replace("：", "").replace(":", "").strip()
                    v = re.sub(r"\(\d+\)", "", v)
                    dicts[k] = v
            else:
                para_key_callback(v)
    elif isinstance(dicts, (list, tuple)):
        for i in range(len(dicts)):
            para_key_callback(dicts[i])


def chaoxingjournal_chaoxingjournallist_callback(callmodel: CallBackModel[JournalListModel]) -> DealModel:
    result = DealModel()
    journal_json_dicts = {}
    dicts = {
        "主办": "publisher",
        "ISSN": "issn",
        "CN": "cnno",
        "分类": "provider_subject",
        "语言": "language",
        "周期": "type_name",
        "简介": "journal_intro",
        "总被引频次": "total_cites",
    }
    func_name = "chaoxingjournal_chaoxingjournallist_callback"
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    journal_rawid = callmodel.sql_model.journal_rawid
    info_dicts = {"task_name": task_info.task_name,
                  "task_tag": task_info.task_tag,
                  "journal_rawid": journal_rawid,
                  "sub_db_id": callmodel.sql_model.sub_db_id,
                  "task_tag_next": task_info.task_tag_next}
    para_dicts = para_dicts["data"]
    para_key_callback(para_dicts)

    issue_json = {}
    journal_list_json = json.loads(callmodel.sql_model.journal_json)
    issue_json["journal_name"] = journal_list_json["journal_name"]
    d_i_model = DealInsertModel()
    d_i_model.insert_pre = CoreSqlValue.insert_ig_it
    # dict_1_1 = BaseDicts.is_dict_exit_key(para_dicts, "1_1", {})
    try:
        collect_newyear_dict = para_dicts["1_1"]["qk_year"]["children"][0]
        collect_newyear = collect_newyear_dict["year"]
    except:
        collect_newyear = ""
    journal_json_dicts["collect_newyear"] = collect_newyear.replace("年", "")
    for k, v in para_dicts.items():
        if k == "1_1":
            for k, v in para_dicts[k].items():
                if k in ["magid", "cover_url"]:
                    journal_json_dicts[k] = v
                    if k == "magid":
                        issue_json["magid"] = v
                elif k == "qk_info":
                    for item in v["children"]:
                        k = item["key_name"].strip(":").strip("：")
                        v = item["value"]
                        if k.__contains__("简介"):
                            k = dicts["简介"]
                            journal_json_dicts[k] = v
                        if k in dicts.keys():
                            k = dicts[k]
                            if k == "total_cites":
                                result.other_dicts.update({k: v})
                            else:
                                journal_json_dicts[k] = v
                        else:
                            result.other_dicts.update({k: v})
                elif k == "qk_year":
                    for item in v["children"]:
                        year = item["year"].replace("年", "")
                        issue_list = item["num"].split(",")
                        for i in issue_list:
                            temp = info_dicts.copy()
                            temp["task_tag"] = temp["task_tag_next"].split(";")[-1]
                            del temp["task_tag_next"]
                            for item_value in para_dicts["1_1"]["qk_info"]["children"]:
                                key_name = item_value["key_name"].strip(":").strip("：")
                                value = item_value["value"]
                                if key_name == "ISSN":
                                    issue_json["issn"] = value
                                elif key_name == "CN":
                                    issue_json["cnno"] = value
                                elif key_name == "分类":
                                    issue_json["provider_subject"] = value
                                elif key_name == "主办":
                                    issue_json["publisher"] = value
                            temp.update(
                                {'pub_year': year, 'num': i, 'issue_json': json.dumps(issue_json, ensure_ascii=False)})
                            # result.next_dicts["insert_ig"]["lists"].append(temp)
                            d_i_model.lists.append(temp)
                elif k == "qk_collect":
                    CollectDatabase = ""
                    rule = re.compile("该刊被数据库收录[:：];(.*)")
                    qk_collect1 = v["qk_collect1"]
                    collect_list = re.findall(rule, qk_collect1)
                    if len(collect_list) != 0:
                        CollectDatabase = collect_list[0]
                    qk_collect2 = v["qk_collect2"]
                    if len(qk_collect2) != 0:
                        CollectDatabase = CollectDatabase + ";" + qk_collect2
                    journal_json_dicts["CollectDatabase"] = CollectDatabase
                else:
                    if k in dicts.values():
                        if k == "total_cites":
                            result.other_dicts.update({k: v})
                        else:
                            journal_json_dicts[k] = v
                    else:
                        result.other_dicts.update({k: v})

            for k in dicts.keys():
                if k not in journal_json_dicts.keys():
                    result.null_dicts.update({k: ""})
    result.next_dicts.insert.append(d_i_model)
    qk_dicts = json.loads(callmodel.sql_model.journal_json)
    qk_dicts.update(journal_json_dicts)
    result.befor_dicts.update.update({"journal_json": json.dumps(qk_dicts, ensure_ascii=False), 'is_active': 1})

    return DealModel().parse_obj(result)


async def sql_select():
    sql = f'select journal_rawid from {SQLTable.journal_list} where task_name = "chaoxingjournal" and task_tag = "chaoxingjournallist" order by rand() limit 1'
    row = await SQLHelper.fetchone(sql)
    return row
    # from main import app
    # pool = app.state.pool
    # async with pool.acquire() as conn:
    #     async with conn.cursor() as cur:
    #         await cur.execute(sql)
    #         row = await cur.fetchone()
    # return row


async def chaoxingjournal_chaoxingjournallist_redis_callback(tmpdict: RedisAllTaskModel) -> RedisAllTaskModel:
    """
        获取model后以model传出, 更新mysql与redis 在请求cookie的API里面已经完成, 故此处不另行更新操作
        writer:zcn
    """
    redis_time_forward = tmpdict.parse_dict["1_1"].task_set.redis_uptime
    redis_time = BaseTime().str_to_timestamp(redis_time_forward)
    headers = json.loads(tmpdict.parse_dict["1_1"].task_info.headers)
    now_time = BaseTime().get_time()
    if now_time - redis_time > 3600:
        journal_rawid = await sql_select()
        data = InputPlatformModel(
            data=journalInputMode(journal_rawid=journal_rawid[0]
                                  )).json()
        url = get_settings().CHAOXING_COOKIE_GET
        rrq = RequestApiHelper.get_rrq()
        kwargs = {
            "rrq": rrq,
            "header": INSIDE_HEADERS,
            "url": url,
            "timeout": 30,
            "data": data,
            "moths": MRequest.POST,
            "middler_list": [rrq.status_code_middlerwares,
                             rrq.is_none_html_middlerwares,
                             rrq.is_null_html_middlerwares,
                             rrq.msg_status_code_middlerwares]
        }
        bools, dicts = await RequestApiHelper.chaoxing_cookie(**kwargs)
        # rrq = AioHttpRequest()
        # headers = INSIDE_HEADERS
        # rrq.set_url(url) \
        #     .set_header(headers) \
        #     .set_timeout(30).set_data(data) \
        #     .set_middler_list(
        #     [rrq.status_code_middlerwares, rrq.is_none_html_middlerwares, rrq.is_null_html_middlerwares,
        #      rrq.msg_status_code_middlerwares])
        # bools, dicts = await rrq.run(MRequest.POST)
        if bools:
            v_dict = json.loads(rrq.html)
            if v_dict["status"] == "SUCCESS":
                headers["Cookie"] = v_dict["data"]["Cookie"]
                tmpdict.parse_dict["1_1"].task_info.headers = headers
                tmpdict.parse_dict["1_1"].task_set.redis_uptime = str(datetime.datetime.now())
            return tmpdict
        else:
            return tmpdict
    else:
        return tmpdict


def chaoxingjournal_chaoxingissue_callback(callmodel: CallBackModel[JournalIssueModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    sql_model = callmodel.sql_model
    info_dicts = {"task_name": task_info.task_name,
                  "task_tag": task_info.task_tag,
                  "task_tag_next": task_info.task_tag_next,
                  "journal_rawid": sql_model.journal_rawid,
                  "pub_year": sql_model.pub_year,
                  "num": sql_model.num,
                  "sub_db_id": sql_model.sub_db_id
                  }
    data_1_1 = para_dicts["data"].get("1_1")
    if data_1_1 is None:
        print("******************")
        return result
    datainfo = data_1_1["data"][0]["datainfo"]
    article_counts = BaseDicts.is_dict_exit_key(datainfo, "count", 0)
    # result.befor_dicts["articlecount"] = article_counts
    # result.befor_dicts["page"] = "1"
    result.befor_dicts.update.update({"articlecount": article_counts, "page": "1"})
    list_col = para_dicts["data"]["1_1"]["data"][0]["datainfo"]["data"]
    issue_json_solve = json.loads(sql_model.issue_json)
    d_i_model = DealInsertModel()
    d_i_model.insert_pre = CoreSqlValue.insert_ig_it
    for item in list_col:
        creator = item["basic_creator"]
        date = item["basic_date"]
        dxid = item["basic_dxid"]

        issue = item["basic_source_no"]
        title = item["basic_title"]
        url = item["basic_title_url"]
        issn = item["basic_identifier_1"]
        temp = info_dicts.copy()
        temp["task_tag"] = temp["task_tag_next"]
        del temp["task_tag_next"]
        temp["creator"] = creator
        temp["date"] = date
        temp["dxid"] = dxid
        temp["issue"] = issue
        temp["title"] = title
        temp["rawid_alt"] = url.lstrip("/")
        temp["source"] = BaseDicts.is_dict_exit_key(issue_json_solve, "journal_name")
        temp["volume"] = sql_model.num.split("=")[-1]
        temp["provider_subject"] = BaseDicts.is_dict_exit_key(issue_json_solve, "provider_subject")
        temp["magid"] = BaseDicts.is_dict_exit_key(issue_json_solve, "magid")
        temp["cnno"] = BaseDicts.is_dict_exit_key(issue_json_solve, "cnno")
        if len(issn) == 0:
            temp["issn"] = BaseDicts.is_dict_exit_key(issue_json_solve, "issn")
        else:
            temp["issn"] = issn
        new_temp = {}
        new_temp["rawid"] = temp["dxid"]
        new_temp["task_tag"] = temp["task_tag"]
        new_temp["task_name"] = temp["task_name"]
        new_temp["sub_db_id"] = temp["sub_db_id"]
        del temp["dxid"]
        del temp["task_tag"]
        del temp["task_name"]
        del temp["sub_db_id"]
        del temp["num"]
        new_temp["article_info_json"] = json.dumps(temp, ensure_ascii=False)
        # result.next_dicts["replace_"]["lists"].append(new_temp)

        d_i_model.lists.append(new_temp)
    result.next_dicts.insert.append(d_i_model)

    return result


def chaoxingjournal_chaoxingarticle_callback(callmodel: CallBackModel[JournalIssueModel]) -> DealModel:
    result = DealModel()
    return result


def chaoxingjournal_chaoxinghome_callback(callmodel: CallBackModel[JournalHomeModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    data = para_dicts["data"]
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "sub_db_id": callmodel.sql_model.sub_db_id,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in data:
        d_i_model = DealInsertModel()
        d_i_model.insert_pre = CoreSqlValue.insert_ig_it
        for item in data["1_1"]['onelevel']:
            if item["fenleiname"] == "推荐":
                continue
            temp = info_dicts.copy()
            task_tag_next_list = temp["task_tag_next"].split(";")
            temp["task_tag"] = task_tag_next_list[0]
            del temp["task_tag_next"]
            field = item["fenleiname"]
            temp["home_rawid"] = item["fenleiid"]
            json_temp = {
                "field": field,
            }
            temp["page_index"] = 1
            temp["home_json"] = json.dumps(json_temp, ensure_ascii=False)
            d_i_model.lists.append(temp)
            # 因为军事和体育两类没有子分类, 特殊处理
            if item["fenleiid"] in ["0110", "0111"]:
                temp_another = copy.deepcopy(temp)
                temp_another["task_tag"] = task_tag_next_list[-1]
                json_temp_another = {
                    "his": field,
                    "field": field,
                }
                temp_another["home_json"] = json.dumps(json_temp_another, ensure_ascii=False)
                d_i_model.lists.append(temp_another)

        result.befor_dicts.update.update({'is_active': 1, "page": -1})
        result.befor_dicts.insert.append(d_i_model)
    return result


def chaoxingjournal_chaoxinghomeclass_callback(callmodel: CallBackModel[JournalHomeModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    data = para_dicts["data"]
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "sub_db_id": callmodel.sql_model.sub_db_id,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in data:
        d_i_model = DealInsertModel()
        d_i_model.insert_pre = CoreSqlValue.insert_ig_it
        for item in data["1_1"]['other']:
            temp = info_dicts.copy()
            temp["task_tag"] = temp["task_tag_next"]
            del temp["task_tag_next"]
            field = item["fenleiname"]
            temp["home_rawid"] = item["fenleiid"]
            befor_field = json.loads(callmodel.sql_model.home_json)["field"]
            json_temp = {
                "his": befor_field + "->" + field,
                "field": field,
            }
            temp["page_index"] = 1
            temp["home_json"] = json.dumps(json_temp, ensure_ascii=False)
            d_i_model.lists.append(temp)
        result.befor_dicts.update.update({'is_active': 1, "page": -1})
        result.befor_dicts.insert.append(d_i_model)
    return result


def deal_sql_dict(sql_dict):
    sql_dict.pop("id")
    sql_dict.pop("update_time")
    sql_dict.pop("create_time")
    sql_dict.pop("null_dicts")
    sql_dict.pop("err_msg")
    sql_dict.pop("other_dicts")
    sql_dict.pop("state")
    sql_dict.pop("failcount")
    return sql_dict

def chaoxingjournal_chaoxingqkhomeclass_callback(callmodel: CallBackModel[JournalHomeModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "sub_db_id": callmodel.sql_model.sub_db_id,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        data = para_dicts["data"]["1_1"]
        total_page = int(data['max_page_info'].split('/')[-1])
        if total_page > 200:
            return result
        result.code_dicts = {
            "1_1": {"max_page": total_page}
        }
        page_index = int(callmodel.sql_model.page_index)
        turn_page = task_info.turn_page
        if (turn_page == 7 and page_index < 3) or (turn_page == 8 and page_index < 4):
            sql_dict = callmodel.sql_model.dict()
            tmp_list = [
                {
                    "key": "task_name",
                    "value": sql_dict["task_name"],
                    "operator": "=",
                    "description": "and"
                },
                {
                    "key": "task_tag",
                    "value": sql_dict["task_tag"],
                    "operator": "=",
                    "description": "and"
                },
                {
                    "key": "home_rawid",
                    "value": sql_dict["home_rawid"],
                    "operator": "=",
                    "description": "and"
                },
                {
                    "key": "page_index",
                    "value": total_page,
                    "operator": ">",
                    "description": "and"
                }
            ]
            du_model_bef_1 = DealUpdateModel()
            du_model_bef_1.update.update({"is_active": "0"})
            list_op = []
            for item in tmp_list:
                op = OperatorSqlModel()
                list_op.append(op.parse_obj(item))
            du_model_bef_1.where = list_op
            result.befor_dicts.update_list.append(du_model_bef_1)
            tmp_list[-1]["operator"] = "<="
            du_model_bef_2 = DealUpdateModel()
            du_model_bef_2.update.update({"is_active": "1"})
            list_op1 = []
            for item in tmp_list:
                op = OperatorSqlModel()
                list_op1.append(op.parse_obj(item))
            du_model_bef_2.where = list_op1
            result.befor_dicts.update_list.append(du_model_bef_2)

            di_model_bef = DealInsertModel()
            di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
            sql_dict.pop("id")
            sql_dict.pop("update_time")
            sql_dict.pop("create_time")
            sql_dict.pop("null_dicts")
            sql_dict.pop("err_msg")
            sql_dict.pop("other_dicts")
            sql_dict.pop("state")
            sql_dict.pop("failcount")

            for page in range(page_index, total_page + 1):
                sql_dict["page"] = total_page
                sql_dict["page_index"] = page
                di_model_bef.lists.append(sql_dict.copy())
            result.befor_dicts.insert.append(di_model_bef)
        di_model_next = DealInsertModel()
        di_model_next.insert_pre = CoreSqlValue.insert_ig_it
        home_json = json.loads(callmodel.sql_model.home_json)
        his = home_json.get("his", "")
        for item in data['qk_info']['children']:
            result.befor_dicts.update.update({'page': total_page})
            journal_rawid = item['url'].split('mags=')[-1]
            qk_name = item['name']
            publisher = item['organ']
            impact = item['impact']
            read_cnt = item['read']
            cited_cnt = item['cited']

            new_dict = dict()
            new_dict["publisher"] = publisher
            new_dict["impact"] = impact
            new_dict["journal_name"] = qk_name
            new_dict["read_cnt"] = read_cnt
            new_dict["cited_cnt"] = cited_cnt

            temp_info = info_dicts.copy()
            task_tag_next = temp_info["task_tag_next"]
            temp_info["task_tag"] = task_tag_next
            del temp_info["task_tag_next"]
            temp_info["journal_rawid"] = journal_rawid
            temp_info["sub_db_id"] = "00006"
            temp_info["is_active"] = 1
            temp_info["subject"] = his
            temp_info["journal_json"] = json.dumps(new_dict, ensure_ascii=False)
            di_model_next.lists.append(temp_info)

            du_model = DealUpdateModel()
            duplicte = json_update(new_dict)
            # du_model.update_no_placeholder.update({"page": max_page})
            du_model.update.update({
                "sub_db_id": "00006",
                "is_active": "1"})
            du_model.update_no_placeholder.update({
                                    "journal_json": f"JSON_SET(journal_json, {duplicte})"})
            du_model.where.update({"journal_rawid": journal_rawid,
                                   "task_tag": temp_info["task_tag"],
                                   "task_name": callmodel.sql_model.task_name
                                   # "is_active": "0"
                                   })
            du_model.update_no_placeholder.update({"subject": f'CONCAT(`subject`,";{his}")'})
            result.next_dicts.update_list.append(du_model)

        result.next_dicts.insert.append(di_model_next)
    return result


def cleanSemicolon(text):
    text = text.replace('；', ';')  # 全角分号转半角分号
    text = re.sub("\\s+;", ";", text)  # 去掉分号前的空白
    text = re.sub(";\\s+", ";", text)  # 去掉分号后的空白
    text = re.sub(";+", ";", text)  # 多个分号转一个分号
    text = re.sub("^;", "", text)  # 去掉最前面的分号
    text = re.sub(";$", "", text)  # 去掉最后面的分号
    text = text.strip()
    return text


def checkExist(obj):
    if obj != None and len(obj) > 0:
        return True
    else:
        return False


def chaoxingjournal_chaoxingarticle_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    para_dicts = callmodel.para_dicts
    if "status" in para_dicts.keys() and para_dicts["status"] == "FAILED":
        result.status = "FAILED"
        result.code = 7
        result.err_msg = "规则解析错误" + str(para_dicts)
        return result
    down_model = callmodel.down_model.down_dict
    sql_model = callmodel.sql_model.get_dict()
    journal_info = sql_model["journal_info"]
    article_info = json.loads(sql_model["article_info_json"])
    # data = para_dicts["data"]["1_1"]
    src_data = down_model["1_1"]

    rawid_alt = article_info.get("rawid_alt", "")
    pub_year = article_info.get("date", "")
    pub_date = pub_year + "0000"
    issn = sql_model.get("issn", "")
    if not checkExist(issn):
        issn = journal_info.get("issn", "")
    vol = article_info.get("volume", "").replace("Vol.", "")
    num = article_info.get("issue", "")
    num = re.sub(r"第(?P<name>\w+)期", "\g<1>", num).replace("No.", "")
    title = article_info.get("title", "")
    journal_name = article_info.get("source", "")
    if not checkExist(journal_name):
        journal_name = cleanSemicolon(journal_info.get("journal_name", ""))
    publisher = journal_info.get("publisher", "")
    cnno = article_info.get("cnno", "")
    if checkExist(cnno):
        cnno = journal_info.get("cnno", "")
    subject = article_info.get("provider_subject", "")
    journal_raw_id = article_info.get("journal_rawid", "")
    data = {}
    down_date = src_data.down_date.split(" ")[0].replace("-", "")
    data["down_date"] = down_date
    data["latest_date"] = down_date
    data["batch"] = time.strftime("%Y%m%d_%H%M%S", time.localtime(time.time()))
    sub_db_id = "00006"
    rawid = sql_model["rawid"]
    data["rawid_mysql"] = rawid
    data["rawid"] = rawid
    data["rawid_alt"] = rawid_alt
    lngid = BaseLngid().GetLngid(sub_db_id, rawid)
    data["is_deprecated"] = "0"
    data["lngid"] = lngid
    data["keyid"] = lngid
    data["product"] = "CHAOXING"
    data["sub_db"] = "QK"
    data["provider"] = "CHAOXING"
    data["sub_db_id"] = sub_db_id
    data["source_type"] = "3"
    data["provider_url"] = "https://qikan.chaoxing.com/" + rawid_alt
    data["publisher"] = publisher
    data["pub_date"] = pub_date
    data["pub_year"] = pub_year
    data["subject"] = cleanSemicolon(subject)
    data["journal_raw_id"] = journal_raw_id
    data["journal_name"] = journal_name
    data["journal_name_alt"] = journal_info.get("journal_name_alt", "")
    data["issn"] = issn
    data["cnno"] = cnno
    data["vol"] = vol
    data["num"] = num
    data["country"] = "CN"
    data["language"] = "ZH"
    html = src_data.html
    if checkExist(html):
        html = re.sub(r"<{2,}", "&lt;", html)
        html = re.sub(r">{2,}", "&gt;", html)
        sel = Selector(html)
        titleTag = sel.xpath("//div[contains(@class,'F_mainright')]/h1")
        abstract_ = ""
        author = ""
        if checkExist(titleTag):
            titleTag.xpath("./sup").remove()
            title = titleTag.xpath("./text()").get("")
            if len(titleTag.xpath("/")) > 1:
                title = sel.xpath("//div[@id='FtextCon']/h1/text()").get("")
                abstract_ = sel.xpath("//div[@id='FtextCon']/p[@class='abstract']/text()").get("")
        author = sel.xpath("//div[contains(@class,'F_mainright')]/p/text()").get("")
        data["abstract"] = cleanSemicolon(abstract_)
        journal_name_alt = ""
        aTags = sel.xpath("//div[contains(@class,'sTopImg')]/p/a/text()")
        if checkExist(aTags) and len(aTags) == 3:
            journal_name_alt = cleanSemicolon(aTags[1].get())
            if journal_name == journal_name_alt:
                journal_name_alt = ""
        data["journal_name_alt"] = journal_name_alt
        trTag = sel.xpath("//div[contains(@class,'Fmian1')]/table/tr")
        if checkExist(trTag):
            jump_page = ""
            begin_page = ""
            end_page = ""
            clc_no = ""
            clc = ""
            keyword = ""
            fund = ""
            cited_cnt = ""
            organ = ""
            for tr in trTag:
                centerTag = tr.xpath("./td[contains(@align,'center')]")
                leftTag = tr.xpath("./td[contains(@align,'left')]")
                if not checkExist(centerTag) or not checkExist(leftTag):
                    continue
                centerTag = centerTag.xpath("string(.)").get("")
                leftTag = leftTag.xpath("string(.)").get("")
                if "作者机构" in centerTag:
                    organ = leftTag
                elif "来    源" in centerTag:
                    pagetype0 = re.findall(r"P(\d+)页", leftTag)
                    pagetype1 = re.findall(r"P(\d+)-(\d+)页", leftTag)
                    pagetype2 = re.findall(r"P(\d+)-(\d+)，(\d+)页", leftTag)
                    if checkExist(pagetype0):
                        begin_page = pagetype0[0]
                        end_page = pagetype0[0]
                    elif checkExist(pagetype1):
                        begin_page = pagetype1[0][0]
                        end_page = pagetype1[0][1]
                    elif checkExist(pagetype2):
                        begin_page = pagetype2[0][0]
                        end_page = pagetype2[0][1]
                        jump_page = pagetype2[0][2]
                    else:
                        sTopImg = sel.xpath("//div[contains(@class,'sTopImg')]/text()").get("")
                        pagetype3 = re.findall(r"(\d+)-(\d+)页", sTopImg)
                        if len(pagetype3) == 2:
                            begin_page = pagetype3[0]
                            end_page = pagetype3[1]
                elif "分 类 号" in centerTag:
                    clc_no = leftTag.replace("；", ";").replace("|", "")
                elif "分类导航" in centerTag:
                    clc = leftTag.replace("->", ";")
                elif "关 键 词" in centerTag:
                    keyword = re.sub(r" +", ";", cleanSemicolon(leftTag))
                elif "基    金" in centerTag:
                    fund = leftTag.replace("；", ";")
                elif "摘    要" in centerTag:
                    data["abstract"] = cleanSemicolon(leftTag)
                elif "统计数据" in centerTag:
                    if "被引量：" in leftTag:
                        cited_cnt = re.sub(r"被引量：(\\d+).*", "[\g<1>]", cleanSemicolon(leftTag))
                        if cited_cnt.isdigit():
                            cited_cnt = "{}@{}".format(cited_cnt, down_date)
            data["jump_page"] = cleanSemicolon(jump_page)
            data["begin_page"] = cleanSemicolon(begin_page)
            data["end_page"] = cleanSemicolon(end_page)
            data["clc_no"] = cleanSemicolon(clc_no)
            data["clc"] = cleanSemicolon(clc)
            data["keyword"] = cleanSemicolon(keyword)
            data["organ"] = cleanSemicolon(organ)
            data["fund"] = cleanSemicolon(fund)
            data["cited_cnt"] = cleanSemicolon(cited_cnt)
        title = re.sub(r"<sup>.*?</sup>", "", title)
        title = re.sub(r"<sub>(.*?)</sub>", "[\g<1>]", title)
        title = re.sub(r"<img.*?>", "", title)
        title = re.sub(r"<SUB>(.*?)</SUB>", "[\g<1>]", title)
        title = re.sub(r"<IMG.*?>", "", title)
        title = re.sub(r"<SUP>(.*?)</SUP>", "[\g<1>]", title)
        title = re.sub(r"<span.*?>", "", title).replace("</span>", "")
        author = re.sub(r"<sup>.*?</sup>", "", author)
        author = re.sub(r"<sub>(.*?)</sub>", "[\g<1>]", author).replace("●", "").replace("，", ";").replace("■ ", "")
        data["title"] = cleanSemicolon(title)
        data["author"] = cleanSemicolon(author)
    save_data = []
    save_data.append({"table": "journal_latest", "data": data})
    result.save_data = save_data
    status = "FAILED"
    err_msg = ""
    code = 7
    if len(data["rawid"]) < 1:
        err_msg = "chaoxingjournal_chaoxingarticle_etl_callback 解析rawid出错"
    elif len(data["title"]) < 1:
        err_msg = "chaoxingjournal_chaoxingarticle_etl_callback 解析title出错"
    else:
        status = "SUCCESS"
    result.status = status
    result.err_msg = err_msg
    result.code = code
    return result
