import re

from fastapi import APIRouter
from re_common.baselibrary.tools.all_requests.aiohttp_request import AioHttpRequest
from re_common.baselibrary.tools.all_requests.mrequest import MRequest
from re_common.baselibrary.utils.core.mlamada import bools_string

from apps.allsubdb.paper_wenhuibao.models import PaperWenHuiBaoGetDay, headers, \
    PaperWenHuiBaoDown
from apps.core.m_route import ContextIncludedRoute
from apps.core.return_info import InputInfoModel, ReturnInfo, FAILED
from apps.crawler_platform.util.requestapihelper import RequestApiHelper

router = APIRouter(route_class=ContextIncludedRoute)


@router.post("/step1/get_day")
async def get_page_link(input: InputInfoModel[PaperWenHuiBaoGetDay]):
    """
       根据报纸的月份返回该报纸所有的可下载日期 marks:periodlist
       :param proxy: 代理
       :param periodMonth:报纸月份:202008
       :return:
    """

    proxy = input.proxy
    periodMonth = input.data.periodMonth
    years = periodMonth[0:4]
    months = periodMonth[-2:]
    marks = input.data.marks
    return_info = ReturnInfo()
    url = r"http://dzb.whb.cn/html/{}-{}/period.xml".format(years, months)
    # this_header = headers.copy()
    rrq = RequestApiHelper.get_rrq()
    kwargs = {
        "rrq": rrq,
        "url": url,
        "timeout": 30,
        "marks": marks,
        "proxy": proxy,
        "moths": MRequest.GET,
        "middler_list": [rrq.status_code_middlerwares, rrq.marks_middlerwares]
    }
    bools, dicts = await RequestApiHelper.etl_remote_meta(**kwargs)
    # rrq = AioHttpRequest()
    # rrq.set_url(url) \
    #     .set_timeout(30).set_marks(marks) \
    #     .set_proxy(proxy) \
    #     .set_middler_list(
    #     [rrq.status_code_middlerwares, rrq.marks_middlerwares])
    # bools, dicts = await rrq.run(MRequest.GET)
    return_info.status = bools_string(bools)
    return_info.msg_code = dicts["code"]
    return_info.msg = dicts["msg"]
    return_info.data = {"html": rrq.html}
    return return_info.todict()


@router.post("/step2/get_page_link")
async def get_page_link(input: InputInfoModel[PaperWenHuiBaoDown]):
    """
       根据报纸的首版返回该报纸所有的版面标题和链接 marks:pageTitle, navigation_other
       :param proxy: 代理
       :param periodDate:报纸日期:20200805
       :return:
    """

    proxy = input.proxy
    periodDate = input.data.periodDate
    # marks = input.data.marks
    return_info = ReturnInfo()
    # url = r"https://bjrbdzb.bjd.com.cn/bjrb/mobile/{}/{}/{}_m.html".format(periodDate[0:4], periodDate,
    #                                                                        periodDate)
    url = input.data.url
    this_header = headers.copy()
    rrq = RequestApiHelper.get_rrq()
    kwargs = {
        "rrq": rrq,
        "header": this_header,
        "url": url,
        "timeout": 30,
        "proxy": proxy,
        "moths": MRequest.GET,
        "middler_list": [rrq.status_code_middlerwares, rrq.marks_middlerwares]
    }
    bools, dicts = await RequestApiHelper.etl_remote_meta(**kwargs)
    # rrq = AioHttpRequest()
    # rrq.set_url(url) \
    #     .set_timeout(30) \
    #     .set_proxy(proxy).set_header(this_header) \
    #     .set_middler_list(
    #     [rrq.status_code_middlerwares, rrq.marks_middlerwares])
    # bools, dicts = await rrq.run(MRequest.GET)
    htmlText = rrq.html
    if htmlText.find('navigation_other') < 0 and htmlText.find("pageTitle") < 0:
        return_info.msg_code = dicts["code"]
        return_info.status = FAILED
        return_info.msg = "查询网页特征值错误"
        return return_info.todict()
    else:
        return_info.status = bools_string(bools)
        return_info.msg_code = dicts["code"]
        return_info.msg = dicts["msg"]
        return_info.data = {"html": rrq.html}
        return return_info.todict()


@router.post("/step3/get_article_link")
async def get_article_link(input: InputInfoModel[PaperWenHuiBaoDown]):
    """
       根据报纸的首版返回该报纸所有的版面标题和链接 marks:pageList
       :param proxy: 代理
       :param periodDate:报纸日期:20200805
       :return:
    """

    proxy = input.proxy
    periodDate = input.data.periodDate
    # marks = input.data.marks
    return_info = ReturnInfo()
    # url = r"https://bjrbdzb.bjd.com.cn/bjrb/mobile/{}/{}/{}_m.html".format(periodDate[0:4], periodDate,
    #                                                                        periodDate)
    url = input.data.url
    this_header = headers.copy()
    rrq = RequestApiHelper.get_rrq()
    kwargs = {
        "rrq": rrq,
        "header": this_header,
        "url": url,
        "timeout": 30,
        "proxy": proxy,
        "moths": MRequest.GET,
        "middler_list": [rrq.status_code_middlerwares, rrq.marks_middlerwares]
    }
    bools, dicts = await RequestApiHelper.etl_remote_meta(**kwargs)
    # rrq = AioHttpRequest()
    # rrq.set_url(url) \
    #     .set_timeout(30) \
    #     .set_proxy(proxy).set_header(this_header) \
    #     .set_middler_list(
    #     [rrq.status_code_middlerwares, rrq.marks_middlerwares])
    # bools, dicts = await rrq.run(MRequest.GET)
    htmlText = rrq.html
    if htmlText.find('navigation_title') < 0 and htmlText.find("List_1") < 0:
        return_info.msg_code = dicts["code"]
        return_info.status = FAILED
        return_info.msg = "查询网页特征值错误"
        return return_info.todict()
    else:
        return_info.status = bools_string(bools)
        return_info.msg_code = dicts["code"]
        return_info.msg = dicts["msg"]
        return_info.data = {"html": rrq.html}
        return return_info.todict()


#
#
@router.post("/step4/article_detail")
async def article_detail(input: InputInfoModel[PaperWenHuiBaoDown]):
    """
       根据文章链接返回文章详情html marks: article, text_c
       :param proxy: 代理
       :param periodDate: 报纸日期
       :param title: 文章标题
       :param url: 文章链接 例:http://paper.people.com.cn/rmrb/html/2019-01/03/nw.D110000renmrb_20190103_2-07.htm

       :return:
    """
    return_info = ReturnInfo()
    proxy = input.proxy
    this_header = headers.copy()
    url = input.data.url
    rrq = RequestApiHelper.get_rrq()
    kwargs = {
        "rrq": rrq,
        "header": this_header,
        "url": url,
        "timeout": 30,
        "proxy": proxy,
        "moths": MRequest.GET,
        "middler_list": [rrq.status_code_middlerwares, rrq.marks_middlerwares]
    }
    bools, dicts = await RequestApiHelper.etl_remote_meta(**kwargs)
    # rrq = AioHttpRequest()
    # rrq.set_url(url) \
    #     .set_timeout(30) \
    #     .set_proxy(proxy).set_header(this_header) \
    #     .set_middler_list(
    #     [rrq.status_code_middlerwares, rrq.marks_middlerwares])
    # bools, dicts = await rrq.run(MRequest.GET)
    htmlText = rrq.html
    if htmlText.find('news_box') < 0 and htmlText.find("textBody") < 0:
        return_info.msg_code = dicts["code"]
        return_info.status = FAILED
        return_info.msg = "查询网页特征值错误"
        return return_info.todict()
    else:
        return_info.status = bools_string(bools)
        return_info.msg_code = dicts["code"]
        return_info.msg = dicts["msg"]
        return_info.data = {"html": rrq.html}
        return return_info.todict()
