import re
from ast import literal_eval

from fastapi import APIRouter
from re_common.baselibrary.tools.myparsel import MParsel
from re_common.baselibrary.utils.basedict import BaseDicts

from re_common.baselibrary.utils.core.mlamada import bools_string
from re_common.vip.baseencodeid import BaseLngid

from apps.allsubdb.paper_xinhuameiridianxun.models import PaperXinHuaMeiRiDianXunParseDays, \
    XinHuaMeiRiDianXunParse2, XinHuaMeiRiDianXunParse3, XinHuaMeiRiDianXunParse4

from apps.core.callback import default_call_back
from apps.core.m_route import ContextIncludedRoute
from apps.core.return_info import InputInfoModel, ReturnInfo

router = APIRouter(route_class=ContextIncludedRoute)


@router.post("/step1/parse_days")
async def parse_days(input: InputInfoModel[PaperXinHuaMeiRiDianXunParseDays]):
    """
    解析信息用于下载ref rel等
    :param input:
    :return:
    """
    return_info = ReturnInfo()
    html = input.data.html
    strs = html.replace("var allpaperdate=", '').strip().rstrip(";")
    date_list = literal_eval(strs)
    return_info.status = bools_string(True)
    return_info.msg_code = 200
    return_info.msg = ""
    return_info.data = date_list
    return return_info.todict()


@router.post("/step2/parse_page_1")
async def parse_article(input: InputInfoModel[XinHuaMeiRiDianXunParse2]):
    """
    解析信息用于下载ref rel等
    :param input:
    :return:
    """
    return_info = ReturnInfo()
    html = input.data.html
    periodDate = input.data.periodDate
    css_dicts = {

        'bzinfo': {
            'parent': '.listdaohang',  # 解析主体
            'children': {
                'parent': 'h4',
                'children': {
                    'versions_title': 'a[href^="Page"]::text',  # 版面名称
                    # "aa":'::text'
                    'page_url': 'a[href^="Page"]::attr(href)',  # 单版面节点
                }

            }
        }
    }

    rule = re.compile(r"^[0-9-]*版")
    mc = MParsel(html=html)
    new_dict = mc.css_parsel(sel=mc.sel, css_selector=css_dicts)

    BaseDicts.get_recursive_dict(new_dict, None, default_call_back)

    for dic_list in new_dict["bzinfo"]["children"]:  # 最外层字典的列表的元素
        for dic_1 in dic_list["children"]:
            banmiantitle = dic_1["versions_title"]
            # 02版 ：要闻
            versions_title_forward = rule.sub("", banmiantitle).replace('：', '').replace(":", '').strip()
            page = rule.findall(banmiantitle)[0].replace("第", '').replace("版", '')
            versions_title = "({})".format(page.zfill(2)) + versions_title_forward
            # 处理版面title
            dic_1["versions_title"] = versions_title
            dic_1["days"] = periodDate
            url_last = dic_1['page_url'].replace(".htm", '').replace("DK", '').replace("Page", '')
            cid = periodDate + url_last
            dic_1["cid"] = cid
            dic_1["page"] = page.zfill(2)
            dic_1["versions_url"] = "http://mrdx.cn/content/{}/".format(periodDate) + dic_1['page_url']

        # dic_1["jsondicts"]["count"] = len(dic_1["children"])
    return_info.status = bools_string(True)
    return_info.msg_code = 200
    return_info.msg = ""
    return_info.data = new_dict["bzinfo"]["children"][0]
    return return_info.todict()

@router.post("/step2/parse_page_0")
async def parse_article(input: InputInfoModel[XinHuaMeiRiDianXunParse2]):
    """
    解析信息用于下载ref rel等
    :param input:
    :return:
    """
    return_info = ReturnInfo()
    html = input.data.html
    periodDate = input.data.periodDate
    css_dicts = {

        'bzinfo': {
            'parent': '.listdaohang',  # 解析主体
            'children': {
                'parent': 'h4',
                'children': {
                    'versions_title': '.pageto::text',  # 版面名称
                    # 'page_url': 'a[href^="Page"]::attr(href)',  # 单版面节点
                }

            }
        }
    }

    rule = re.compile(r"^[0-9-]*版")
    mc = MParsel(html=html)
    new_dict = mc.css_parsel(sel=mc.sel, css_selector=css_dicts)

    BaseDicts.get_recursive_dict(new_dict, None, default_call_back)

    for dic_list in new_dict["bzinfo"]["children"]:  # 最外层字典的列表的元素
        for dic_1 in dic_list["children"]:
            banmiantitle = dic_1["versions_title"]
            # 02版 ：要闻
            versions_title_forward = rule.sub("", banmiantitle).replace('：', '').replace(":", '').strip()
            page = rule.findall(banmiantitle)[0].replace("第", '').replace("版", '')
            versions_title = "({})".format(page.zfill(2)) + versions_title_forward
            # 处理版面title
            dic_1["versions_title"] = versions_title
            dic_1["days"] = periodDate
            cid = periodDate + page.zfill(2)
            dic_1["cid"] = cid
            dic_1["page"] = page.zfill(2)
            dic_1["versions_url"] = "http://mrdx.cn/content/{}/".format(periodDate) + "PageArticleIndexLB.htm"

        # dic_1["jsondicts"]["count"] = len(dic_1["children"])
    return_info.status = bools_string(True)
    return_info.msg_code = 200
    return_info.msg = ""
    return_info.data = new_dict["bzinfo"]["children"][0]
    return return_info.todict()




@router.post("/step2/parse_page_2")
async def parse_article(input: InputInfoModel[XinHuaMeiRiDianXunParse2]):
    """
    解析信息用于下载ref rel等
    :param input:
    :return:
    """
    return_info = ReturnInfo()
    html = input.data.html
    periodDate = input.data.periodDate
    css_dicts = {

        'bzinfo': {
            'parent': 'td[valign="top"] a[href^="Page"][class="atitle"]',  # 解析主体
            'children': {
                "versions_title_forward": 'a::text',  # 版面名称
                'page_url': 'a::attr(href)',  # 单版面节点

            }
        }
    }

    rule = re.compile(r"^[0-9-]*:")
    mc = MParsel(html=html)
    new_dict = mc.css_parsel(sel=mc.sel, css_selector=css_dicts)

    BaseDicts.get_recursive_dict(new_dict, None, default_call_back)
    for dic_1 in new_dict["bzinfo"]["children"]:  # 最外层字典的列表的元素
        # for dic_1 in dic_list["children"]:
        banmiantitle = dic_1["versions_title_forward"]
        versions_title_forward = rule.sub("", banmiantitle).strip()
        page = rule.findall(banmiantitle)[0].replace("第", '').replace("版", '').replace(":", '')
        versions_title = "({})".format(page) + versions_title_forward
        # 处理版面title
        dic_1["versions_title"] = versions_title
        dic_1["days"] = periodDate
        url_last = dic_1['page_url'].replace(".htm", '').replace("HO", '').replace("Page", '')
        cid = periodDate + url_last
        dic_1["cid"] = cid
        dic_1["page"] = page
        dic_1["versions_url"] = "http://mrdx.cn/content/{}/".format(periodDate) + dic_1['page_url']

        # dic_1["jsondicts"]["count"] = len(dic_1["children"])
    return_info.status = bools_string(True)
    return_info.msg_code = 200
    return_info.msg = ""
    return_info.data = new_dict["bzinfo"]["children"]
    return return_info.todict()


@router.post("/step3/parse_page_1")
async def parse_article(input: InputInfoModel[XinHuaMeiRiDianXunParse3]):
    """
    解析信息用于下载ref rel等
    :param input:
    :return:
    """
    return_info = ReturnInfo()
    html = input.data.html
    periodDate = input.data.periodDate
    page = input.data.page
    css_dicts = {

        'bzinfo': {
            'parent': '.listdaohang',  # 解析主体
            'children': {
                'parent': 'ul li a[alt="{}"]'.format(page),
                'children': {
                    'title': 'a::text',  # 文章名称
                    'article_url_format': 'a::attr(daoxiang)',  # 文章url
                }

            }
        }
    }

    mc = MParsel(html=html)
    new_dict = mc.css_parsel(sel=mc.sel, css_selector=css_dicts)
    BaseDicts.get_recursive_dict(new_dict, None, default_call_back)

    for dic_list in new_dict["bzinfo"]["children"]:  # 最外层字典的列表的元素
        for dic_1 in dic_list["children"]:
            last_url = dic_1["article_url_format"].split(";")[-1]
            last_url_after = last_url.replace("Articel", '').replace(".htm", '').replace("NU", '')
            dic_1["rawid"] = periodDate + last_url_after
            dic_1["days"] = periodDate
            dic_1["article_url"] = "http://mrdx.cn/content/{}/".format(periodDate) + last_url
    return_info.status = bools_string(True)
    return_info.msg_code = 200
    return_info.msg = ""
    return_info.data = new_dict["bzinfo"]["children"][0]
    return return_info.todict()


@router.post("/step3/parse_page_2")
async def parse_article(input: InputInfoModel[XinHuaMeiRiDianXunParse3]):
    """
    解析信息用于下载ref rel等
    :param input:
    :return:
    """
    return_info = ReturnInfo()
    html = input.data.html
    periodDate = input.data.periodDate
    page = input.data.page
    css_dicts = {

        'bzinfo': {
            'parent': 'td[valign="top"] a[href^="Articel"][class="atitle"]',  # 解析主体
            'children': {

                'title': 'a::text',  # 文章名称
                'article_url_format': 'a::attr(href)',  # 文章url

            }
        }
    }

    mc = MParsel(html=html)
    new_dict = mc.css_parsel(sel=mc.sel, css_selector=css_dicts)
    BaseDicts.get_recursive_dict(new_dict, None, default_call_back)
    for dic_1 in new_dict["bzinfo"]["children"]:  # 最外层字典的列表的元素
        # for dic_1 in dic_list["children"]:
        last_url = dic_1["article_url_format"].split(";")[-1]
        dic_1["rawid"] = periodDate + last_url.replace("Articel", '').replace(".htm", '').replace("BB", '')
        dic_1["days"] = periodDate
        dic_1["article_url"] = "http://mrdx.cn/content/{}/".format(periodDate) + last_url
    return_info.status = bools_string(True)
    return_info.msg_code = 200
    return_info.msg = ""
    return_info.data = new_dict["bzinfo"]["children"]
    return return_info.todict()


def call_back(key, value):
    """
    默认的解析回调
    :param key:
    :param value:
    :return:
    """
    result = ""
    if isinstance(value, list):
        for val in value:
            val = val.replace("\r", "").replace("\n", "").replace(u"\xa0", "").strip()
            if val != "":
                result = result + val + "\n"
    else:
        result = value
    result = result.rstrip(";")
    return result


def judgement(strs_list, title):
    for indexs, datas in enumerate(strs_list):
        if datas == title:
            return indexs


@router.post("/step4_1/parse_article_detail")
async def parse_article_one(input: InputInfoModel[XinHuaMeiRiDianXunParse4]):
    """
    解析信息用于下载ref rel等
    :param input:
    :return:
    """
    return_info = ReturnInfo()
    html = input.data.html
    getdicts = input.data.jsondicts
    downdate = input.data.downdate
    rawid = input.data.rawid
    batch = input.data.batch
    css_dicts = {
        'bzinfo': {
            'parent': 'div.neirong',  # 解析主体
            'children': {
                "title_catalyst": '.bggray .margin15 h3::text',
                # "judge": '.news_header *::text',
                "title": '.bggray .margin15 h2::text',
                "title_alt": '.bggray .margin15 h4::text',
                # "authors": "h4::text",
                'p_info': '#contenttext>div *::text',  # 正文

            }
        }
    }

    mc = MParsel(html=html)
    new_dict = mc.css_parsel(sel=mc.sel, css_selector=css_dicts)
    titles_info = new_dict["bzinfo"]["children"][0]
    titles_info["authors"] = ''

    BaseDicts.get_recursive_dict(new_dict, None, call_back)
    new_one_dict = new_dict["bzinfo"]["children"][0]

    dic = {
        "lngid": BaseLngid().GetLngid("00356", rawid),
        "sub_db_id": "00356",
        "batch": batch,
        "rawid": rawid,
        "source_type": '11',
        "journal_name": "新华每日电讯",
        'provider_url': getdicts["url"],
        'title': new_one_dict["title"],
        'title_catalyst': new_one_dict["title_catalyst"],
        'title_alt': new_one_dict["title_alt"],
        "author": new_one_dict["authors"],
        "abstract": new_one_dict["p_info"],
        'pub_year': getdicts["years"],
        'pub_date': getdicts["periodDate"],
        'meeting_counts': getdicts["page"],
        'index': getdicts["index"],
        "sub_db": 'PAPER',
        "zt_provider": 'mrdxpaper',
        "provider": 'XINHUANET',
        "product": 'MRDX',
        "country": "CN",
        "language": "ZH",
        "web_site": "http://www.mrdx.cn/",
        'down_date': downdate
    }

    return_info.status = bools_string(True)
    return_info.msg_code = 200
    return_info.msg = ""
    return_info.data = {"result": dic}
    return return_info.todict()


@router.post("/step4_2/parse_article_detail")
async def parse_article_one(input: InputInfoModel[XinHuaMeiRiDianXunParse4]):
    """
    解析信息用于下载ref rel等
    :param input:
    :return:
    """
    return_info = ReturnInfo()
    html = input.data.html
    getdicts = input.data.jsondicts
    downdate = input.data.downdate
    rawid = input.data.rawid
    batch = input.data.batch
    css_dicts = {
        'bzinfo': {
            'parent': '#contenttext',  # 解析主体
            'children': {
                # "title_catalyst": '.title1::text',
                "title": 'table tr td div strong font::text',
                "judge": 'table tr td div font::text',
                # "title_alt": '.title2::text',
                # "authors": "p::text",
                'p_info': 'font[face="Arial"]>div *::text',  # 正文

            }
        }
    }

    mc = MParsel(html=html)
    new_dict = mc.css_parsel(sel=mc.sel, css_selector=css_dicts)
    titles_info = new_dict["bzinfo"]["children"][0]
    titles_info["authors"] = ''
    if len(titles_info["title"]) == 0:
        css_dicts = {
            'bzinfo': {
                'parent': '#contenttext',  # 解析主体
                'children': {
                    # "title_catalyst": '.title1::text',
                    "title": 'table tr td div font strong::text',
                    "judge": 'table tr td div font *::text',
                    # "title_alt": '.title2::text',
                    # "authors": "p::text",
                    'p_info': 'font[face="Arial"]>div *::text',  # 正文

                }
            }
        }
        mc = MParsel(html=html)
        new_dict = mc.css_parsel(sel=mc.sel, css_selector=css_dicts)
        titles_info = new_dict["bzinfo"]["children"][0]
        titles_info["authors"] = ''
        if len(titles_info["title"]) == 0:
            css_dicts = {
                'bzinfo': {
                    'parent': '#contenttext',  # 解析主体
                    'children': {
                        # "title_catalyst": '.title1::text',
                        "title": 'table tr td div font *::text',
                        # "judge": 'table tr td div font::text',
                        # "title_alt": '.title2::text',
                        # "authors": "p::text",
                        'p_info': 'font[face="Arial"]>div *::text',  # 正文

                    }
                }
            }
            mc = MParsel(html=html)
            new_dict = mc.css_parsel(sel=mc.sel, css_selector=css_dicts)
            titles_info = new_dict["bzinfo"]["children"][0]
            titles_info["authors"] = ''
            titles_info["title_catalyst"] = ''
            titles_info["title_alt"] = ''
        else:
            if len(titles_info["title"]) == len(titles_info["judge"]):
                titles_info["title_catalyst"] = ''
                titles_info["title_alt"] = ''
            else:
                tj = titles_info["judge"]
                title_catalyst_list = tj[:tj.index(titles_info["title"][0])]
                title_alt_list = tj[tj.index(titles_info["title"][-1]) + 1:]
                titles_info["title_catalyst"] = title_catalyst_list
                titles_info["title_alt"] = title_alt_list

    else:
        if len(titles_info["title"]) == len(titles_info["judge"]):
            titles_info["title_catalyst"] = ''
            titles_info["title_alt"] = ''
        else:
            tj = titles_info["judge"]
            title_catalyst_list = tj[:tj.index(titles_info["title"][0])]
            title_alt_list = tj[tj.index(titles_info["title"][-1]) + 1:]
            titles_info["title_catalyst"] = title_catalyst_list
            titles_info["title_alt"] = title_alt_list

    BaseDicts.get_recursive_dict(new_dict, None, call_back)

    new_one_dict = new_dict["bzinfo"]["children"][0]

    dic = {
        "lngid": BaseLngid().GetLngid("00356", rawid),
        "sub_db_id": "00356",
        "batch": batch,
        "rawid": rawid,
        "source_type": '11',
        "journal_name": "新华每日电讯",
        'provider_url': getdicts["url"],
        'title': new_one_dict["title"],
        'title_catalyst': new_one_dict["title_catalyst"],
        'title_alt': new_one_dict["title_alt"],
        "author": new_one_dict["authors"],
        "abstract": new_one_dict["p_info"],
        'pub_year': getdicts["years"],
        'pub_date': getdicts["periodDate"],
        'meeting_counts': getdicts["page"],
        'index': getdicts["index"],
        "sub_db": 'PAPER',
        "zt_provider": 'mrdxpaper',
        "provider": 'XINHUANET',
        "product": 'MRDX',
        "country": "CN",
        "language": "ZH",
        "web_site": "http://www.mrdx.cn/",
        'down_date': downdate
    }

    return_info.status = bools_string(True)
    return_info.msg_code = 200
    return_info.msg = ""
    return_info.data = {"result": dic}
    return return_info.todict()
