import re

from fastapi import APIRouter
from re_common.baselibrary.tools.myparsel import MParsel
from re_common.baselibrary.utils.basedict import BaseDicts

from re_common.baselibrary.utils.core.mlamada import bools_string
from re_common.vip.baseencodeid import BaseLngid

from apps.allsubdb.paper_fazhiribao.models import PaperFaZhiRiBaoParseDays, \
    FaZhiRiBaoParse2, FaZhiRiBaoParse3, FaZhiRiBaoParse4

from apps.core.callback import default_call_back
from apps.core.m_route import ContextIncludedRoute
from apps.core.return_info import InputInfoModel, ReturnInfo
from ast import literal_eval

router = APIRouter(route_class=ContextIncludedRoute)


@router.post("/step1/parse_days")
async def parse_days(input: InputInfoModel[PaperFaZhiRiBaoParseDays]):
    """
    解析信息用于下载ref rel等
    :param input:
    :return:
    """
    return_info = ReturnInfo()
    html = input.data.html
    strs = html.replace("var allpaperdate=", '').strip().rstrip(";")
    date_list = literal_eval(strs)
    return_info.status = bools_string(True)
    return_info.msg_code = 200
    return_info.msg = ""
    return_info.data = date_list
    return return_info.todict()


@router.post("/step2/parse_page")
async def parse_article(input: InputInfoModel[FaZhiRiBaoParse2]):
    """
    解析信息用于下载ref rel等
    :param input:
    :return:
    """
    return_info = ReturnInfo()
    html = input.data.html
    periodDate = input.data.periodDate
    css_dicts = {

        'bzinfo': {
            'parent': 'td[valign=top] .atitle',  # 解析主体
            'children': {
                'versions_title': 'a[href^="Page"]::text',  # 版面名称
                'page_url': 'a[href^="Page"]::attr(href)',  # 单版面节点

            }
        }
    }

    rule = re.compile(r"^[0-9]*")
    mc = MParsel(html=html)
    new_dict = mc.css_parsel(sel=mc.sel, css_selector=css_dicts)

    BaseDicts.get_recursive_dict(new_dict, None, default_call_back)
    deal_list = list()
    for index, dic_1 in enumerate(new_dict["bzinfo"]["children"]):  # 最外层字典的列表的元素
        banmiantitle = dic_1["versions_title"]
        if len(banmiantitle) != 0:
            versions_title_forward = rule.sub("", banmiantitle).strip()
            page = rule.findall(banmiantitle)[0].replace("第", '').replace("版", '')
            versions_title = "({})".format(page.zfill(2)) + versions_title_forward
            # 处理版面title
            dic_1["versions_title"] = versions_title
            dic_1["days"] = periodDate
            url_last = dic_1['page_url'].replace("Page", '').replace("TB.htm", '')
            cid = periodDate + url_last
            dic_1["cid"] = cid
            dic_1["page"] = page
            dic_1["versions_url"] = "http://epaper.legaldaily.com.cn/fzrb/content/{}/".format(periodDate) + dic_1[
                'page_url']
            deal_list.append(dic_1)
    new_dict["bzinfo"]["children"] = deal_list
    return_info.status = bools_string(True)
    return_info.msg_code = 200
    return_info.msg = ""
    return_info.data = new_dict["bzinfo"]["children"]
    return return_info.todict()


@router.post("/step3/parse_page")
async def parse_article(input: InputInfoModel[FaZhiRiBaoParse3]):
    """
    解析信息用于下载ref rel等
    :param input:
    :return:
    """
    return_info = ReturnInfo()
    html = input.data.html
    periodDate = input.data.periodDate
    css_dicts = {

        'bzinfo': {
            'parent': 'td[valign=top] .atitle',  # 解析主体
            'children': {
                'title': 'a[href^="Articel"]::text',  # 版面名称
                'article_url_format': 'a[href^="Articel"]::attr(href)',  # 单版面节点

            }
        }
    }

    mc = MParsel(html=html)
    new_dict = mc.css_parsel(sel=mc.sel, css_selector=css_dicts)

    BaseDicts.get_recursive_dict(new_dict, None, default_call_back)
    deal_list = list()
    for dic_1 in new_dict["bzinfo"]["children"]:  # 最外层字典的列表的元素
        title = dic_1["title"]
        if len(title) != 0:
            dic_1["rawid"] = periodDate + dic_1['article_url_format'].replace("Articel", '').replace(".htm",
                                                                                                     '').replace("GN",
                                                                                                                 '')
            dic_1["article_url"] = "http://epaper.legaldaily.com.cn/fzrb/content/{}/".format(periodDate) + dic_1[
                'article_url_format']
            dic_1["days"] = periodDate
            deal_list.append(dic_1)
    new_dict["bzinfo"]["children"] = deal_list
    return_info.status = bools_string(True)
    return_info.msg_code = 200
    return_info.msg = ""
    return_info.data = new_dict["bzinfo"]["children"]
    return return_info.todict()


def call_back(key, value):
    """
    默认的解析回调
    :param key:
    :param value:
    :return:
    """
    result = ""
    if isinstance(value, list):
        for val in value:
            val = val.replace("\r", "").replace("\n", "").replace(u"\xa0", "").strip()
            if val != "":
                result = result + val + "\n"
    else:
        result = value
    result = result.rstrip(";")
    return result


def judgement(strs_list, title):
    for indexs, datas in enumerate(strs_list):
        if datas == title:
            return indexs



@router.post("/step4/parse_article_detail")
async def parse_article_one(input: InputInfoModel[FaZhiRiBaoParse4]):
    """
    解析信息用于下载ref rel等
    :param input:
    :return:
    """
    return_info = ReturnInfo()
    html = input.data.html
    getdicts = input.data.jsondicts
    downdate = input.data.downdate
    rawid = input.data.rawid
    batch = input.data.batch
    css_dicts = {
        'bzinfo': {
            'parent': 'td[valign=top]:not([width])',  # 解析主体
            'children': {
                "title": "span[style='display:'] tr[valign=top][style='display:'] td span strong *::text",
                "judge": "span[style='display:'] tr[valign=top][style='display:'] td span *::text",
                'p_info': '#oldcontenttext *::text',  # 正文

            }
        }
    }

    mc = MParsel(html=html)
    new_dict = mc.css_parsel(sel=mc.sel, css_selector=css_dicts)
    titles_info = new_dict["bzinfo"]["children"][0]
    # 该报纸同种结构下, 标题等信息会放在不同位置
    tj = titles_info["judge"]
    if len(tj) == 0:
        css_dicts = {
            'bzinfo': {
                'parent': '#contenttext',  # 解析主体
                'children': {
                    "title": "tbody tr div strong font::text",
                    "judge": "tbody tr div font::text",
                    'p_info': 'font[face="Arial"]::text,font>div::text,font>div>div::text',  # 正文

                }
            }
        }

        mc = MParsel(html=html)
        new_dict = mc.css_parsel(sel=mc.sel, css_selector=css_dicts)
        titles_info = new_dict["bzinfo"]["children"][0]
        if len(titles_info["title"]) == 0:
            return_info.status = bools_string(True)
            return_info.msg_code = 200
            return_info.msg = "no title"
            return_info.data = {"result": ''}
            return return_info.todict()
        else:
            tj = titles_info["judge"]
            title_catalyst_list = tj[:tj.index(titles_info["title"][0])]
            title_alt_list = tj[tj.index(titles_info["title"][-1]) + 1:]
            titles_info["title_catalyst"] = title_catalyst_list
            titles_info["title_alt"] = title_alt_list
            BaseDicts.get_recursive_dict(new_dict, None, call_back)
    else:
        title_catalyst_list = tj[:tj.index(titles_info["title"][0])]
        title_alt_list = tj[tj.index(titles_info["title"][-1]) + 1:]
        titles_info["title_catalyst"] = title_catalyst_list
        titles_info["title_alt"] = title_alt_list

        BaseDicts.get_recursive_dict(new_dict, None, call_back)

    new_one_dict = new_dict["bzinfo"]["children"][0]
    dic = {
        "lngid": BaseLngid().GetLngid("00350", rawid),
        "sub_db_id": "00350",
        "batch": batch,
        "rawid": rawid,
        "source_type": '11',
        "journal_name": "法制日报",
        'provider_url': getdicts["url"],
        'title': new_one_dict["title"],
        'title_catalyst': new_one_dict["title_catalyst"],
        'title_alt': new_one_dict["title_alt"],
        "author": '',
        "abstract": new_one_dict["p_info"],
        'pub_year': getdicts["years"],
        'pub_date': getdicts["periodDate"],
        'meeting_counts': getdicts["page"],
        'index': getdicts["index"],
        "sub_db": 'PAPER',
        "zt_provider": 'fzrbpaper',
        "provider": 'LEGALDAILY',
        "product": 'LEGALDAILYFZRB',
        "country": "CN",
        "language": "ZH",
        "web_site": "http://www.legaldaily.com.cn/",
        'down_date': downdate
    }

    return_info.status = bools_string(True)
    return_info.msg_code = 200
    return_info.msg = ""
    return_info.data = {"result": dic}
    return return_info.todict()
