import re
from pprint import pprint

from fastapi import APIRouter
from re_common.baselibrary.tools.myparsel import MParsel
from re_common.baselibrary.utils.basedict import BaseDicts
from re_common.baselibrary.utils.core.mlamada import bools_string
from re_common.baselibrary.utils.core.requests_core import MsgCode
from re_common.vip.baseencodeid import BaseLngid

from apps.allsubdb.paper_sciencenet.models import ZGKXBParse4
from apps.core.callback import default_call_back
from apps.core.global_model import ParseHtmlModel
from apps.core.m_route import ContextIncludedRoute
from apps.core.return_info import InputInfoModel, ReturnInfo

router = APIRouter(route_class=ContextIncludedRoute)


@router.post("/step1/parse_get_days_list")
async def parse_days_list(input: InputInfoModel[ParseHtmlModel]):
    return_info = ReturnInfo()
    html = input.data.html
    css_dicts = {
        'days_month': {
            'viewstate': 'input[id="__VIEWSTATE"]::attr(value)',
            'parent': 'table[id="Calendar1"]',  # 解析主体
            'children': {
                'before': 'a[title="转到上一个月"]::attr(href)',
                'after': 'a[title="转到下一个月"]::attr(href)',
                'now_time': 'tr > td > table[cellspacing="0"] > tr > td[align="center"]::text',
                'days': {
                    'parent': 'tr > td > a',
                    'children': {
                        "title": 'a::attr(title)',
                        "text": 'a::text',
                        "href": 'a::attr(href)',
                    }
                }

            }
        }
    }

    mc = MParsel(html=html)
    new_dict = mc.css_parsel(sel=mc.sel, css_selector=css_dicts)
    BaseDicts.get_recursive_dict(new_dict, None, default_call_back)
    for item in new_dict["days_month"]["children"]:  # 最外层字典的列表的元素
        item["before"] = item["before"].replace("javascript:__doPostBack(", "").replace(")", "").replace("'", "").split(
            ",")

        item["after"] = item["after"].replace("javascript:__doPostBack(", "").replace(")", "").replace("'", "").split(
            ",")

        item["now_time"] = item["now_time"].replace("月", "").replace("年", "-")
        for sub_item in item["days"]["children"]:
            sub_item["href"] = sub_item["href"].replace("javascript:__doPostBack(", "") \
                .replace(")", "").replace("'", "").split(",")

    return_info.status = bools_string(True)
    return_info.msg_code = 200
    return_info.msg = ""
    return_info.data = {"result": new_dict}
    return return_info.todict()


@router.post("/step2/parse_get_article_list")
async def parse_get_article_list(input: InputInfoModel[ParseHtmlModel]):
    """
    解析某一天的html 得到文章的url 和标题列表，版不用单独请求 都在页面中
    :param input:
    :return:
    """
    return_info = ReturnInfo()
    html = input.data.html
    if html.find("当日无报纸出版") > 0:
        return_info.status = bools_string(True)
        return_info.msg_code = MsgCode.PARE_NO_DATA
        return_info.msg = "当日无报纸出版"
        return_info.data = []
        return return_info.todict()

    xpath_dicts = {
        'parent': '//*[@id="main"]/table/tr[2]/td[1]/table/tr',  # 解析主体
        'children': {
            "plate": './td[1]/table/tr/td/text()',
            "parent": './td[1]/div/table/tr',
            "children": {
                "url": './td[1]/a/@href',
                "title": './td[1]/a/text()',
                "author": './td[2]/text()',
            }
        }
    }

    rule = re.compile(r"^[第A-Z][0-9-]*[版 ]*")
    mc = MParsel(html=html)
    new_dict = mc.xpath_parsel(sel=mc.sel, xpath_selector=xpath_dicts)
    BaseDicts.get_recursive_dict(new_dict, None, default_call_back)

    lists = new_dict["children"]
    result_list = []
    if len(lists) % 2 != 0:
        return_info.status = bools_string(True)
        return_info.msg_code = MsgCode.PARE_STRUCTURE_ERROR
        return_info.msg = "理论上解析出来版和数据一一对应，应该为双数，但出现单数情况，请检查后修复"
        return_info.data = {"result": new_dict}
        return return_info.todict()
    for i in range(0, len(lists), 2):
        palte_item, article_item = lists[i: i + 2]
        palte = palte_item["plate"]  # 版本信息
        palte_num_judge = rule.findall(palte)
        if len(palte_num_judge) == 0:
            palte_num = ''
        else:
            palte_num = rule.findall(palte)[0].replace("第", '').replace("版", '')
        palte_title = rule.sub("", palte).strip()
        # try:
        #     palte_num, palte_title = palte.split(" ")
        # except:
        #     palte_num = palte
        #     palte_title = ""
        for index, item in enumerate(article_item["children"]):
            item["palte_num"] = palte_num.strip()
            item["palte_title"] = palte_title.strip()
            item["author"] = item["author"].replace("文/图", "").strip()
            item["version_num"] = str(int((i + 2) / 2))
            item["index"] = str(index + 1)
            result_list.append(item)

    return_info.status = bools_string(True)
    return_info.msg_code = 200
    return_info.msg = ""
    return_info.data = result_list
    return return_info.todict()


def call_back(key, value):
    """
    默认的解析回调
    :param key:
    :param value:
    :return:
    """
    result = ""
    if isinstance(value, list):
        for val in value:
            val = val.replace("\r", "").replace("\n", "").replace(u"\xa0", "").strip()
            if val != "":
                result = result + val + "\n"
    else:
        result = value
    result = result.rstrip(";")
    return result


@router.post("/step4_1/parse_article_detail")
async def parse_article_one(input: InputInfoModel[ZGKXBParse4]):
    """
    解析信息用于下载ref rel等
    :param input:
    :return:
    """
    return_info = ReturnInfo()
    html = input.data.html
    getdicts = input.data.jsondicts
    downdate = input.data.downdate
    rawid = input.data.rawid
    batch = input.data.batch
    css_dicts = {
        'bzinfo': {
            'parent': 'table[align="center"] tr:nth-of-type(2) td[valign="top"]:nth-of-type(1)',  # 解析主体
            'children': {
                "title_catalyst": 'tr:nth-of-type(1) td[valign="middle"] b::text',
                "title": 'td[valign="bottom"] *::text',
                "title_alt": 'tr:nth-of-type(3) td[valign="middle"] b::text',
                "authors": "tr:nth-of-type(5) table tr td:nth-of-type(1)::text",
                'p_info': '#content1 div *::text',  # 正文

            }
        }
    }
    rule = re.compile(r"[0-9]{4}-[0-9]{1,2}-[0-9]{1,2}")
    rule_as = re.compile(r"来源.*\s*.*")
    mc = MParsel(html=html)
    new_dict = mc.css_parsel(sel=mc.sel, css_selector=css_dicts)

    BaseDicts.get_recursive_dict(new_dict, None, call_back)
    new_one_dict = new_dict["bzinfo"]["children"][0]
    pub_date_forward = rule.findall(new_one_dict["authors"])
    if len(pub_date_forward) == 0:
        dic = {
            "lngid": BaseLngid().GetLngid("00378", rawid),
            "sub_db_id": "00378",
            "batch": batch,
            "rawid": rawid,
            "source_type": '11',
            "journal_name": "中国科学报",
            'provider_url': getdicts["url"],
            'title': new_one_dict["title"],
            'title_catalyst': new_one_dict["title_catalyst"],
            'title_alt': new_one_dict["title_alt"],
            "author": rule_as.sub('', new_one_dict["authors"]).strip(),
            "abstract": new_one_dict["p_info"],
            'pub_year': getdicts["days"][0:4],
            'pub_date': getdicts["days"],
            'meeting_counts': getdicts["palte_num"],
            'meeting_counts_as': getdicts["version_num"].zfill(2),
            'index': getdicts["index"],
            "sub_db": 'PAPER',
            "zt_provider": 'sciencenetpaper',
            "provider": 'SCIENCENET',
            "product": 'NEWSSCIENCENET',
            "country": "CN",
            "language": "ZH",
            "web_site": "http://news.sciencenet.cn/dz/dznews_photo.aspx",
            'down_date': downdate
        }
    else:
        pub_date = pub_date_forward[0]
        dic = {
            "lngid": BaseLngid().GetLngid("00378", rawid),
            "sub_db_id": "00378",
            "batch": batch,
            "rawid": rawid,
            "source_type": '11',
            "journal_name": "中国科学报",
            'provider_url': getdicts["url"],
            'title': new_one_dict["title"],
            'title_catalyst': new_one_dict["title_catalyst"],
            'title_alt': new_one_dict["title_alt"],
            "author": rule_as.sub('', new_one_dict["authors"]).strip(),
            "abstract": new_one_dict["p_info"],
            'pub_year': pub_date[0:4],
            'pub_date': pub_date,
            'meeting_counts': getdicts["palte_num"],
            'meeting_counts_as': getdicts["version_num"].zfill(2),
            'index': getdicts["index"],
            "sub_db": 'PAPER',
            "zt_provider": 'sciencenetpaper',
            "provider": 'SCIENCENET',
            "product": 'NEWSSCIENCENET',
            "country": "CN",
            "language": "ZH",
            "web_site": "http://news.sciencenet.cn/dz/dznews_photo.aspx",
            'down_date': downdate
        }

    return_info.status = bools_string(True)
    return_info.msg_code = 200
    return_info.msg = ""
    return_info.data = {"result": dic}
    return return_info.todict()


@router.post("/step4_2/parse_article_detail")
async def parse_article_one(input: InputInfoModel[ZGKXBParse4]):
    """
    解析信息用于下载ref rel等
    :param input:
    :return:
    """
    return_info = ReturnInfo()
    html = input.data.html
    getdicts = input.data.jsondicts
    downdate = input.data.downdate
    rawid = input.data.rawid
    batch = input.data.batch
    css_dicts = {
        'bzinfo': {
            'parent': 'div #content',  # 解析主体
            'children': {
                "title_catalyst": '#content1 table tr:nth-of-type(2) td *::text',
                "title": '#content1 table tr:nth-of-type(3) td *::text',
                "title_alt": '#content1 table tr:nth-of-type(4) td *::text',
                "authors": "tr:nth-of-type(1) td div:nth-of-type(1)::text",
                'p_info': '#content1 p *::text,#content1>div::text',  # 正文

            }
        }
    }
    rule = re.compile(r"[0-9]{4}-[0-9]{1,2}-[0-9]{1,2}")
    rule_as = re.compile(r"来源.*\s*.*")
    mc = MParsel(html=html)
    new_dict = mc.css_parsel(sel=mc.sel, css_selector=css_dicts)

    BaseDicts.get_recursive_dict(new_dict, None, call_back)
    new_one_dict = new_dict["bzinfo"]["children"][0]
    pub_date_forward = rule.findall(new_one_dict["authors"])
    if len(pub_date_forward) == 0:
        dic = {
            "lngid": BaseLngid().GetLngid("00378", rawid),
            "sub_db_id": "00378",
            "batch": batch,
            "rawid": rawid,
            "source_type": '11',
            "journal_name": "中国科学报",
            'provider_url': getdicts["url"],
            'title': new_one_dict["title"],
            'title_catalyst': new_one_dict["title_catalyst"],
            'title_alt': new_one_dict["title_alt"],
            "author": rule_as.sub('', new_one_dict["authors"]).strip(),
            "abstract": new_one_dict["p_info"],
            'pub_year': getdicts["days"][0:4],
            'pub_date': getdicts["days"],
            'meeting_counts': getdicts["palte_num"],
            'meeting_counts_as': getdicts["version_num"].zfill(2),
            'index': getdicts["index"],
            "sub_db": 'PAPER',
            "zt_provider": 'sciencenetpaper',
            "provider": 'SCIENCENET',
            "product": 'NEWSSCIENCENET',
            "country": "CN",
            "language": "ZH",
            "web_site": "http://news.sciencenet.cn/dz/dznews_photo.aspx",
            'down_date': downdate
        }
    else:
        pub_date = pub_date_forward[0]
        dic = {
            "lngid": BaseLngid().GetLngid("00378", rawid),
            "sub_db_id": "00378",
            "batch": batch,
            "rawid": rawid,
            "source_type": '11',
            "journal_name": "中国科学报",
            'provider_url': getdicts["url"],
            'title': new_one_dict["title"],
            'title_catalyst': new_one_dict["title_catalyst"],
            'title_alt': new_one_dict["title_alt"],
            "author": rule_as.sub('', new_one_dict["authors"]).strip(),
            "abstract": new_one_dict["p_info"],
            'pub_year': pub_date[0:4],
            'pub_date': pub_date,
            'meeting_counts': getdicts["palte_num"],
            'meeting_counts_as': getdicts["version_num"].zfill(2),
            'index': getdicts["index"],
            "sub_db": 'PAPER',
            "zt_provider": 'sciencenetpaper',
            "provider": 'SCIENCENET',
            "product": 'NEWSSCIENCENET',
            "country": "CN",
            "language": "ZH",
            "web_site": "http://news.sciencenet.cn/dz/dznews_photo.aspx",
            'down_date': downdate
        }

    return_info.status = bools_string(True)
    return_info.msg_code = 200
    return_info.msg = ""
    return_info.data = {"result": dic}
    return return_info.todict()
