import re

from fastapi import APIRouter
from re_common.baselibrary.tools.myparsel import MParsel
from re_common.baselibrary.utils.basedict import BaseDicts

from re_common.baselibrary.utils.core.mlamada import bools_string
from re_common.vip.baseencodeid import BaseLngid

from apps.allsubdb.paper_renminribao_guonei.models import PaperRenMinRiBaoParseDays, \
    RenMinRiBaoParse2, RenMinRiBaoParse3, RenMinRiBaoParse4

from apps.core.callback import default_call_back
from apps.core.m_route import ContextIncludedRoute
from apps.core.return_info import InputInfoModel, ReturnInfo

router = APIRouter(route_class=ContextIncludedRoute)


@router.post("/step1/parse_days")
async def parse_days(input: InputInfoModel[PaperRenMinRiBaoParseDays]):
    """
    解析信息用于下载ref rel等
    :param input:
    :return:
    """
    return_info = ReturnInfo()
    html = input.data.html
    # html_content = html.encode('utf8')
    # htmlText = html_content.decode('utf8').strip()
    xpath_dicts = {
        'bzinfo': {
            'parent': '//periodlist',  # 解析主体
            'children': {
                'parent': './period',
                'children': {
                    "period_date": "./period_date/text()",
                    "front_page": "./front_page/text()"
                }

            }
        }
    }

    mc = MParsel(html=html)
    new_dict = mc.xpath_parsel(sel=mc.sel, xpath_selector=xpath_dicts)
    BaseDicts.get_recursive_dict(new_dict, None, default_call_back)
    return_info.status = bools_string(True)
    return_info.msg_code = 200
    return_info.msg = ""
    return_info.data = new_dict["bzinfo"]["children"][0]
    return return_info.todict()


@router.post("/step2/parse_page_1")
async def parse_article(input: InputInfoModel[RenMinRiBaoParse2]):
    """
    解析信息用于下载ref rel等
    :param input:
    :return:
    """
    return_info = ReturnInfo()
    html = input.data.html
    periodDate = input.data.periodDate
    css_dicts = {

        'bzinfo': {
            'parent': '#pageList',  # 解析主体
            'children': {
                'parent': 'div .right_title-name',
                'children': {
                    'versions_title': 'a::text',  # 版面名称
                    'page_url': 'a::attr(href)',  # 单版面节点
                }
            }
        }
    }

    rule = re.compile(r"第[0-9]*版")
    mc = MParsel(html=html)
    new_dict = mc.css_parsel(sel=mc.sel, css_selector=css_dicts)

    BaseDicts.get_recursive_dict(new_dict, None, default_call_back)

    for dic_list in new_dict["bzinfo"]["children"]:  # 最外层字典的列表的元素
        for dic_1 in dic_list["children"]:
            banmiantitle = dic_1["versions_title"]
            versions_title_forward = rule.sub("", banmiantitle).strip()
            page = rule.findall(banmiantitle)[0].replace("第", '').replace("版", '')
            versions_title = "({})".format(page.zfill(2)) + versions_title_forward
            # 处理版面title
            dic_1["versions_title"] = versions_title
            dic_1["days"] = periodDate
            url_last = dic_1['page_url'].replace("./nbs.D110000renmrb_", '').replace("nbs.D110000renmrb_", '').replace(
                ".htm", '')
            cid = periodDate + url_last
            dic_1["cid"] = cid
            dic_1["page"] = page
            dic_1["versions_url"] = "http://paper.people.com.cn/rmrb/html/{}-{}/{}/".format(periodDate[0:4],
                                                                                            periodDate[4:6],
                                                                                            periodDate[-2:]) + dic_1[
                                        'page_url'].strip('./')

            # dic_1["jsondicts"]["count"] = len(dic_1["children"])
    return_info.status = bools_string(True)
    return_info.msg_code = 200
    return_info.msg = ""
    return_info.data = new_dict["bzinfo"]["children"][0]
    return return_info.todict()


@router.post("/step2/parse_page_2")
async def parse_article(input: InputInfoModel[RenMinRiBaoParse2]):
    """
    解析信息用于下载ref rel等
    :param input:
    :return:
    """
    return_info = ReturnInfo()
    html = input.data.html
    periodDate = input.data.periodDate
    css_dicts = {

        'bzinfo': {
            'parent': 'div .swiper-container',  # 解析主体
            'children': {
                'parent': '.swiper-slide',
                'children': {
                    'versions_title': 'a::text',  # 版面名称
                    'page_url': 'a::attr(href)',  # 单版面节点
                }
            }
        }
    }

    rule = re.compile(r"[0-9]*版")
    mc = MParsel(html=html)
    new_dict = mc.css_parsel(sel=mc.sel, css_selector=css_dicts)

    BaseDicts.get_recursive_dict(new_dict, None, default_call_back)

    for dic_list in new_dict["bzinfo"]["children"]:  # 最外层字典的列表的元素
        for dic_1 in dic_list["children"]:
            banmiantitle = dic_1["versions_title"]
            versions_title_forward = rule.sub("", banmiantitle).strip()
            page = rule.findall(banmiantitle)[0].replace("第", '').replace("版", '')
            versions_title = "({})".format(page.zfill(2)) + versions_title_forward
            # 处理版面title
            dic_1["versions_title"] = versions_title
            dic_1["days"] = periodDate
            url_last = dic_1['page_url'].replace("./nbs.D110000renmrb_", '').replace("nbs.D110000renmrb_", '').replace(
                ".htm", '')
            cid = periodDate + url_last
            dic_1["cid"] = cid
            dic_1["page"] = page
            dic_1["versions_url"] = "http://paper.people.com.cn/rmrb/html/{}-{}/{}/".format(periodDate[0:4],
                                                                                            periodDate[4:6],
                                                                                            periodDate[-2:]) + dic_1[
                                        'page_url'].strip('./')

            # dic_1["jsondicts"]["count"] = len(dic_1["children"])
    return_info.status = bools_string(True)
    return_info.msg_code = 200
    return_info.msg = ""
    return_info.data = new_dict["bzinfo"]["children"][0]
    return return_info.todict()


@router.post("/step3/parse_page_1")
async def parse_article(input: InputInfoModel[RenMinRiBaoParse3]):
    """
    解析信息用于下载ref rel等
    :param input:
    :return:
    """
    return_info = ReturnInfo()
    html = input.data.html
    periodDate = input.data.periodDate
    css_dicts = {

        'bzinfo': {
            'parent': '#titleList',  # 解析主体
            'children': {
                'parent': 'ul li',
                'children': {
                    'title_format': 'a script::text',  # 文章名称
                    'article_url_format': 'a::attr(href)',  # 文章url
                }
            }
        }
    }

    rule = re.compile(r'document\.write\(view\(\"(.*?)\"\)\)')
    mc = MParsel(html=html)
    new_dict = mc.css_parsel(sel=mc.sel, css_selector=css_dicts)

    BaseDicts.get_recursive_dict(new_dict, None, default_call_back)

    for dic_list in new_dict["bzinfo"]["children"]:  # 最外层字典的列表的元素
        for dic_1 in dic_list["children"]:
            last_url = dic_1["article_url_format"].split(";")[-1]
            dic_1["rawid"] = last_url.replace("nw.D110000renmrb_", '').replace(".htm", '').replace(
                "_", '').replace("-", '')
            dic_1["title"] = rule.findall(dic_1["title_format"])[0].strip()
            dic_1["days"] = periodDate
            dic_1["article_url"] = "http://paper.people.com.cn/rmrb/html/{}-{}/{}/".format(periodDate[0:4],
                                                                                           periodDate[4:6],
                                                                                           periodDate[-2:]) + last_url
    return_info.status = bools_string(True)
    return_info.msg_code = 200
    return_info.msg = ""
    return_info.data = new_dict["bzinfo"]["children"][0]
    return return_info.todict()


@router.post("/step3/parse_page_2")
async def parse_article(input: InputInfoModel[RenMinRiBaoParse3]):
    """
    解析信息用于下载ref rel等
    :param input:
    :return:
    """
    return_info = ReturnInfo()
    html = input.data.html
    periodDate = input.data.periodDate
    css_dicts = {

        'bzinfo': {
            'parent': 'div .news',  # 解析主体
            'children': {
                'parent': 'ul li',
                'children': {
                    'title': 'a::text',  # 文章名称
                    'article_url_format': 'a::attr(href)',  # 文章url
                }
            }
        }
    }

    mc = MParsel(html=html)
    new_dict = mc.css_parsel(sel=mc.sel, css_selector=css_dicts)
    BaseDicts.get_recursive_dict(new_dict, None, default_call_back)
    for dic_list in new_dict["bzinfo"]["children"]:  # 最外层字典的列表的元素
        for dic_1 in dic_list["children"]:
            last_url = dic_1["article_url_format"].split(";")[-1]
            dic_1["rawid"] = last_url.replace("nw.D110000renmrb_", '').replace(".htm", '').replace(
                "_", '').replace("-", '')
            dic_1["days"] = periodDate
            dic_1["article_url"] = "http://paper.people.com.cn/rmrb/html/{}-{}/{}/".format(periodDate[0:4],
                                                                                           periodDate[4:6],
                                                                                           periodDate[-2:]) + last_url
    return_info.status = bools_string(True)
    return_info.msg_code = 200
    return_info.msg = ""
    return_info.data = new_dict["bzinfo"]["children"][0]
    return return_info.todict()


def call_back(key, value):
    """
    默认的解析回调
    :param key:
    :param value:
    :return:
    """
    result = ""
    if isinstance(value, list):
        for val in value:
            val = val.replace("\r", "").replace("\n", "").replace(u"\xa0", "").strip()
            if val != "":
                result = result + val + "\n"
    else:
        result = value
    result = result.rstrip(";")
    return result





@router.post("/step4_1/parse_article_detail")
async def parse_article_one(input: InputInfoModel[RenMinRiBaoParse4]):
    """
    解析信息用于下载ref rel等
    :param input:
    :return:
    """
    return_info = ReturnInfo()
    html = input.data.html
    getdicts = input.data.jsondicts
    downdate = input.data.downdate
    rawid = input.data.rawid
    batch = input.data.batch
    css_dicts = {
        'bzinfo': {
            'parent': 'div .text_c',  # 解析主体
            'children': {
                "title_catalyst": 'h3::text',
                "title": 'h1::text',
                "title_alt": 'h2::text',
                "authors": "h4::text",
                'p_info': '#ozoom p *::text',  # 正文

            }
        }
    }

    mc = MParsel(html=html)
    new_dict = mc.css_parsel(sel=mc.sel, css_selector=css_dicts)

    BaseDicts.get_recursive_dict(new_dict, None, call_back)
    new_one_dict = new_dict["bzinfo"]["children"][0]

    dic = {
        "lngid": BaseLngid().GetLngid("00353", rawid),
        "sub_db_id": "00353",
        "batch": batch,
        "rawid": rawid,
        "source_type": '11',
        "journal_name": "人民日报",
        'provider_url': getdicts["url"],
        'title': new_one_dict["title"],
        'title_catalyst': new_one_dict["title_catalyst"],
        'title_alt': new_one_dict["title_alt"],
        "author": new_one_dict["authors"],
        "abstract": new_one_dict["p_info"],
        'pub_year': getdicts["years"],
        'pub_date': getdicts["periodDate"],
        'meeting_counts': getdicts["page"],
        'index': getdicts["index"],
        "sub_db": 'PAPER',
        "zt_provider": 'rmrbpaper',
        "provider": 'PEOPLECN',
        "product": 'RMRB',
        "country": "CN",
        "language": "ZH",
        "web_site": "http://paper.people.com.cn/rmrb/paperindex.htm",
        'down_date': downdate
    }

    return_info.status = bools_string(True)
    return_info.msg_code = 200
    return_info.msg = ""
    return_info.data = {"result": dic}
    return return_info.todict()


@router.post("/step4_2/parse_article_detail")
async def parse_article_one(input: InputInfoModel[RenMinRiBaoParse4]):
    """
    解析信息用于下载ref rel等
    :param input:
    :return:
    """
    return_info = ReturnInfo()
    html = input.data.html
    getdicts = input.data.jsondicts
    downdate = input.data.downdate
    rawid = input.data.rawid
    batch = input.data.batch
    css_dicts = {
        'bzinfo': {
            'parent': 'div .article',  # 解析主体
            'authors': 'div .article >p::text',  # 解析主体
            'children': {
                "title_catalyst": 'h3::text',
                "title": 'h1::text',
                "title_alt": 'h2::text',
                # "authors": "p::text",
                'p_info': '#ozoom p *::text',  # 正文

            }
        }
    }

    mc = MParsel(html=html)
    new_dict = mc.css_parsel(sel=mc.sel, css_selector=css_dicts)

    BaseDicts.get_recursive_dict(new_dict, None, call_back)
    new_one_dict = new_dict["bzinfo"]["children"][0]

    dic = {
        "lngid": BaseLngid().GetLngid("00353", rawid),
        "sub_db_id": "00353",
        "batch": batch,
        "rawid": rawid,
        "source_type": '11',
        "journal_name": "人民日报",
        'provider_url': getdicts["url"],
        'title': new_one_dict["title"],
        'title_catalyst': new_one_dict["title_catalyst"],
        'title_alt': new_one_dict["title_alt"],
        "author": new_dict["bzinfo"]["authors"],
        "abstract": new_one_dict["p_info"],
        'pub_year': getdicts["years"],
        'pub_date': getdicts["periodDate"],
        'meeting_counts': getdicts["page"],
        'index': getdicts["index"],
        "sub_db": 'PAPER',
        "zt_provider": 'rmrbpaper',
        "provider": 'PEOPLECN',
        "product": 'RMRB',
        "country": "CN",
        "language": "ZH",
        "web_site": "http://paper.people.com.cn/rmrb/paperindex.htm",
        'down_date': downdate
    }

    return_info.status = bools_string(True)
    return_info.msg_code = 200
    return_info.msg = ""
    return_info.data = {"result": dic}
    return return_info.todict()
