import re
import requests
import time

from fastapi import APIRouter
from re_common.baselibrary.tools.all_requests.aiohttp_request import AioHttpRequest
from re_common.baselibrary.tools.all_requests.mrequest import MRequest
from re_common.baselibrary.utils.core.mlamada import bools_string

from apps.allsubdb.paper_beijingribao.models import PaperBeiJingRiBaoForm, headers, PaperBeiJingRiBaoArticle, \
    PaperBeiJingRiBaoGetDay
from apps.core.m_route import ContextIncludedRoute
from apps.core.return_info import InputInfoModel, ReturnInfo
from apps.crawler_platform.util.requestapihelper import RequestApiHelper

router = APIRouter(route_class=ContextIncludedRoute)


@router.post("/step1/get_day")
async def get_page_link(input: InputInfoModel[PaperBeiJingRiBaoGetDay]):
    """
       根据报纸的月份返回该报纸所有的可下载日期 marks:datelist
       :param proxy: 代理
       :param periodMonth:报纸月份:202008
       :return:
    """

    proxy = input.proxy
    periodMonth = input.data.periodMonth
    marks = input.data.marks
    return_info = ReturnInfo()
    timestr = str(time.time()).split('.')[-1] + '634'
    url = f"https://bjrbdzb.bjd.com.cn/bjrb/period/{periodMonth}/period.js?_t=0.37078366827687925&_={timestr}"
    # this_header = headers.copy()
    this_header = {
        r"authority": r"bjrbdzb.bjd.com.cn",
        r"method": "GET",
        r"path": f"/bjrb/period/{periodMonth}/period.js?_t=0.37078366827687925&_={timestr}",
        r"scheme": "https",
        "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
        "accept-encoding": "gzip, deflate, br",
        "accept-language": "zh-CN,zh;q=0.9",
        "sec-ch-ua": '"Google Chrome";v="89", "Chromium";v="89", ";Not A Brand";v="99"',
        "sec-ch-ua-mobile": "?0",
        "sec-fetch-dest": "document",
        "sec-fetch-mode": "navigate",
        "sec-fetch-site": "none",
        "sec-fetch-user": "?1",
        "upgrade-insecure-requests": "1",
        "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.128 Safari/537.36",
}
    rrq = RequestApiHelper.get_rrq()
    kwargs = {
        "rrq": rrq,
        "header": this_header,
        "url": url,
        "timeout": 30,
        "marks": marks,
        "proxy": proxy,
        "moths": MRequest.GET,
        "middler_list": [rrq.status_code_middlerwares, rrq.marks_middlerwares]
    }
    bools, dicts = await RequestApiHelper.etl_remote_meta(**kwargs)
    # rrq = AioHttpRequest()
    # rrq.set_url(url) \
    #     .set_timeout(30).set_marks(marks) \
    #     .set_proxy(proxy).set_header(this_header) \
    #     .set_middler_list(
    #     [rrq.status_code_middlerwares, rrq.marks_middlerwares])
    # bools, dicts = await rrq.run(MRequest.GET)
    return_info.status = bools_string(bools)
    return_info.msg_code = dicts["code"]
    return_info.msg = dicts["msg"]
    return_info.data = {"html": rrq.html}
    return return_info.todict()


@router.post("/step1/get_day_article_link")
async def get_page_link(input: InputInfoModel[PaperBeiJingRiBaoForm]):
    """
       根据报纸的日期返回该报纸所有的版面标题和链接 marks:nav-panel-primary
       :param proxy: 代理
       :param periodDate:报纸日期:20200805
       :return:
    """

    proxy = input.proxy
    periodDate = input.data.periodDate
    marks = input.data.marks
    return_info = ReturnInfo()
    url = r"https://bjrbdzb.bjd.com.cn/bjrb/mobile/{}/{}/{}_m.html".format(periodDate[0:4], periodDate,
                                                                           periodDate)
    this_header = headers.copy()
    rrq = RequestApiHelper.get_rrq()
    kwargs = {
        "rrq": rrq,
        "header": this_header,
        "url": url,
        "timeout": 30,
        "marks": marks,
        "proxy": proxy,
        "moths": MRequest.GET,
        "middler_list": [rrq.status_code_middlerwares, rrq.marks_middlerwares, rrq.end_middlerwares]
    }
    bools, dicts = await RequestApiHelper.etl_remote_meta(**kwargs)
    # rrq = AioHttpRequest()
    # rrq.set_url(url) \
    #     .set_timeout(30).set_marks(marks) \
    #     .set_proxy(proxy).set_header(this_header) \
    #     .set_middler_list(
    #     [rrq.status_code_middlerwares, rrq.end_middlerwares, rrq.marks_middlerwares])
    # bools, dicts = await rrq.run(MRequest.GET)
    return_info.status = bools_string(bools)
    return_info.msg_code = dicts["code"]
    return_info.msg = dicts["msg"]
    return_info.data = {"html": rrq.html}
    return return_info.todict()


@router.post("/step4/article_detail")
async def article_detail(input: InputInfoModel[PaperBeiJingRiBaoArticle]):
    """
       根据文章链接返回文章详情html marks: content my-gallery
       :param proxy: 代理
       :param periodDate: 报纸日期
       :param title: 文章标题
       :param url: 文章链接 例:https://bjrbdzb.bjd.com.cn/bjrb/mobile/2020/20201027/20201027_001/content_20201027_001_4.htm#page0

       :return:
    """
    return_info = ReturnInfo()
    proxy = input.proxy
    this_header = headers.copy()
    url = input.data.url
    marks = input.data.marks
    rrq = RequestApiHelper.get_rrq()
    kwargs = {
        "rrq": rrq,
        "header": this_header,
        "url": url,
        "timeout": 30,
        "marks": marks,
        "proxy": proxy,
        "moths": MRequest.GET,
        "middler_list": [rrq.status_code_middlerwares, rrq.marks_middlerwares, rrq.end_middlerwares]
    }
    bools, dicts = await RequestApiHelper.etl_remote_meta(**kwargs)
    # rrq = AioHttpRequest()
    # rrq.set_url(url) \
    #     .set_timeout(30).set_marks(marks) \
    #     .set_proxy(proxy).set_header(this_header) \
    #     .set_middler_list(
    #     [rrq.status_code_middlerwares, rrq.end_middlerwares, rrq.marks_middlerwares])
    # bools, dicts = await rrq.run(MRequest.GET)
    return_info.status = bools_string(bools)
    return_info.msg_code = dicts["code"]
    return_info.msg = dicts["msg"]
    return_info.data = {"html": rrq.html}
    return return_info.todict()
