import re

from fastapi import APIRouter
from parsel import Selector
from re_common.baselibrary.tools.myparsel import MParsel
from re_common.baselibrary.utils.basedict import BaseDicts

from re_common.baselibrary.utils.core.mlamada import bools_string
from re_common.vip.baseencodeid import BaseLngid

from apps.allsubdb.paper_nongminribao.models import PaperNongMinRiBaoParseDays, \
    NongMinRiBaoParse2, NongMinRiBaoParse3, NongMinRiBaoParse4

from apps.core.callback import default_call_back
from apps.core.m_route import ContextIncludedRoute
from apps.core.return_info import InputInfoModel, ReturnInfo

router = APIRouter(route_class=ContextIncludedRoute)


@router.post("/step1/parse_days")
async def parse_days(input: InputInfoModel[PaperNongMinRiBaoParseDays]):
    """
    解析信息用于下载ref rel等
    :param input:
    :return:
    """
    return_info = ReturnInfo()
    html = input.data.html
    html_content = html.replace("var", "").replace("datelist", "").replace("=", "").replace(";", "")
    daylist = eval(html_content)
    return_info.status = bools_string(True)
    return_info.msg_code = 200
    return_info.msg = ""
    return_info.data = daylist
    return return_info.todict()


@router.post("/step2/parse_page")
async def parse_page(input: InputInfoModel[NongMinRiBaoParse2]):
    """
    解析信息用于下载ref rel等
    :param input:
    :return:
    """
    return_info = ReturnInfo()
    html = input.data.html
    periodDate = input.data.periodDate
    css_dicts = {
        'bzinfo': {
            'parent': '#listpage .bancititle a',  # 解析主体
            'children': {
                'versions_title': 'span::text',  # 版面名称
                'page_url': 'a::attr(href)',  # 单版面节点
            }
        }
    }

    rule = re.compile(r"第[0-9]*版：")
    mc = MParsel(html=html)
    new_dict = mc.css_parsel(sel=mc.sel, css_selector=css_dicts)
    BaseDicts.get_recursive_dict(new_dict, None, default_call_back)
    for dic_1 in new_dict["bzinfo"]["children"]:  # 最外层字典的列表的元素
        # for dic_1 in dic_list["children"]:
        banmiantitle = dic_1["versions_title"]
        versions_title_forward = rule.sub("", banmiantitle).strip()
        page = rule.findall(banmiantitle)[0].replace("第", '').replace("版", '').replace("：", '')
        versions_title = "({}):".format(page.zfill(3)) + versions_title_forward
        # 处理版面title
        dic_1["versions_title"] = versions_title
        dic_1["days"] = periodDate
        url = "http://szb.farmer.com.cn/{}/{}".format(periodDate[0:4], periodDate) + dic_1['page_url'].strip(".")
        cid = dic_1['page_url'].split("/")[1].replace("_", '')
        dic_1["cid"] = cid
        dic_1["page"] = page
        dic_1["versions_url"] = url
        # "http://szb.farmer.com.cn/2020/20201201/20201201_001/20201201_001.html"
    return_info.status = bools_string(True)
    return_info.msg_code = 200
    return_info.msg = ""
    return_info.data = new_dict["bzinfo"]["children"]
    return return_info.todict()


@router.post("/step3/parse_page")
async def parse_article(input: InputInfoModel[NongMinRiBaoParse3]):
    """
    解析信息用于下载ref rel等
    :param input:
    :return:
    """
    return_info = ReturnInfo()
    html = input.data.html
    periodDate = input.data.periodDate
    css_dicts = {

        'bzinfo': {
            'parent': 'map[name="map_of_yyb"] area[id^="items"]',  # 解析主体
            'children': {
                'title': 'area::attr(name)',  # 文章名称
                'article_url_format': 'area::attr(href)',  # 文章url

            }
        }
    }

    mc = MParsel(html=html)
    new_dict = mc.css_parsel(sel=mc.sel, css_selector=css_dicts)

    BaseDicts.get_recursive_dict(new_dict, None, default_call_back)

    for dic_1 in new_dict["bzinfo"]["children"]:  # 最外层字典的列表的元素

        last_url = dic_1["article_url_format"].replace("../", '')
        dic_1["rawid"] = last_url.split("/")[-1].replace(".htm\'", '').replace(".htm", '').replace("_", '')
        dic_1["days"] = periodDate
        dic_1["article_url"] = "http://szb.farmer.com.cn/{}/{}/".format(periodDate[0:4],
                                                                        periodDate) + last_url
    return_info.status = bools_string(True)
    return_info.msg_code = 200
    return_info.msg = ""
    return_info.data = new_dict["bzinfo"]["children"]
    return return_info.todict()


def call_back(key, value):
    """
    默认的解析回调
    :param key:
    :param value:
    :return:
    """
    result = ""
    if isinstance(value, list):
        for val in value:
            val = val.replace("\r", "").replace("\n", "").replace(u"\xa0", "").strip()
            if val != "":
                result = result + val + "\n"
    else:
        result = value
    result = result.rstrip(";")
    return result


@router.post("/step4/parse_article_detail")
async def parse_article_one(input: InputInfoModel[NongMinRiBaoParse4]):
    """
    解析信息用于下载ref rel等
    :param input:
    :return:
    """
    return_info = ReturnInfo()
    html = input.data.html
    getdicts = input.data.jsondicts
    downdate = input.data.downdate
    rawid = input.data.rawid
    batch = input.data.batch
    css_dicts = {
        'bzinfo': {
            'parent': 'td[rowspan="4"] table tr div table[style]',  # 解析主体
            'children': {
                # "infos": 'table[style] table td[class^="font"],table[style] table td[class^="font"]>h5',
                "title": '.font01>h1::text,.font01::text',
                'p_info': '.font6 span p *::text',  # 正文
                "parent": 'table[style] table td[class^="font"]',
                'children': {
                    "infos": "td::text,td>*::text"
                }

            }
        }
    }



    mc = MParsel(html=html)

    new_dict = mc.css_parsel(sel=mc.sel, css_selector=css_dicts)
    infos_list = list()
    for i in new_dict["bzinfo"]["children"][0]["children"]:
        if len(i["infos"]) == 0:
            infos_list.append('')
        else:
            infos_list.append('\n'.join(i["infos"]))

    BaseDicts.get_recursive_dict(new_dict, None, call_back)
    new_one_dict = new_dict["bzinfo"]["children"][0]

    dic = {
        "lngid": BaseLngid().GetLngid("00376", rawid),
        "sub_db_id": "00376",
        "batch": batch,
        "rawid": rawid,
        "source_type": '11',
        "journal_name": "农民日报",
        'provider_url': getdicts["url"],
        'title': new_one_dict["title"],
        'title_catalyst': infos_list[0],
        'title_alt': infos_list[2],
        "author": infos_list[3],
        "abstract": new_one_dict["p_info"],
        'pub_year': getdicts["years"],
        'pub_date': getdicts["periodDate"],
        'meeting_counts': getdicts["page"],
        'index': getdicts["index"],
        "sub_db": 'PAPER',
        "zt_provider": 'farmerpaper',
        "provider": 'FARMERCN',
        "product": 'SZBFARMER',
        "country": "CN",
        "language": "ZH",
        "web_site": "http://szb.farmer.com.cn/",
        'down_date': downdate
    }

    return_info.status = bools_string(True)
    return_info.msg_code = 200
    return_info.msg = ""
    return_info.data = {"result": dic}
    return return_info.todict()
