import random
import re
import time

from fastapi import APIRouter
from re_common.baselibrary.tools.all_requests.aiohttp_request import AioHttpRequest
from re_common.baselibrary.tools.all_requests.mrequest import MRequest
from re_common.baselibrary.utils.core.mlamada import bools_string

from apps.allsubdb.paper_fazhiribao.models import PaperFaZhiRiBaoGetDay, headers, \
    PaperFaZhiRiBaoDown
from apps.core.m_route import ContextIncludedRoute
from apps.core.return_info import InputInfoModel, ReturnInfo, FAILED
from apps.crawler_platform.util.requestapihelper import RequestApiHelper

router = APIRouter(route_class=ContextIncludedRoute)


@router.post("/step1/get_day")
async def get_page_link(input: InputInfoModel[PaperFaZhiRiBaoGetDay]):
    """
       直接请求 php获得全部日期 marks:allpaperdate
       :param proxy: 代理
       :return:
    """

    proxy = input.proxy
    marks = input.data.marks
    return_info = ReturnInfo()
    url = r"http://epaper.legaldaily.com.cn/fzrb/content/datelist.php"
    # this_header = headers.copy()
    rrq = RequestApiHelper.get_rrq()
    kwargs = {
        "rrq": rrq,
        "url": url,
        "timeout": 30,
        "marks": marks,
        "proxy": proxy,
        "moths": MRequest.GET,
        "middler_list": [rrq.status_code_middlerwares, rrq.marks_middlerwares]
    }
    bools, dicts = await RequestApiHelper.etl_remote_meta(**kwargs)
    # rrq = AioHttpRequest()
    # rrq.set_url(url) \
    #     .set_timeout(30).set_marks(marks) \
    #     .set_proxy(proxy) \
    #     .set_middler_list(
    #     [rrq.status_code_middlerwares, rrq.marks_middlerwares])
    # bools, dicts = await rrq.run(MRequest.GET)
    return_info.status = bools_string(bools)
    return_info.msg_code = dicts["code"]
    return_info.msg = dicts["msg"]
    return_info.data = {"html": rrq.html}
    return return_info.todict()


@router.post("/step2/get_page_link")
async def get_page_link(input: InputInfoModel[PaperFaZhiRiBaoDown]):
    """
       根据报纸的首版返回该报纸所有的版面标题和链接 marks:atitle
       :param proxy: 代理
       :param periodDate:报纸日期:20200805
       :return:
    """

    time.sleep(random.randint(1,3))
    proxy = input.proxy
    periodDate = input.data.periodDate
    marks = input.data.marks
    return_info = ReturnInfo()
    url = input.data.url
    this_header = headers.copy()
    rrq = RequestApiHelper.get_rrq()
    kwargs = {
        "rrq": rrq,
        "header": this_header,
        "url": url,
        "timeout": 30,
        "marks": marks,
        "proxy": proxy,
        "moths": MRequest.GET,
        "middler_list": [rrq.status_code_middlerwares, rrq.marks_middlerwares]
    }
    bools, dicts = await RequestApiHelper.etl_remote_meta(**kwargs)
    # rrq = AioHttpRequest()
    # rrq.set_url(url) \
    #     .set_timeout(30).set_marks(marks) \
    #     .set_proxy(proxy).set_header(this_header) \
    #     .set_middler_list(
    #     [rrq.status_code_middlerwares, rrq.marks_middlerwares])
    # bools, dicts = await rrq.run(MRequest.GET)
    return_info.status = bools_string(bools)
    return_info.msg_code = dicts["code"]
    return_info.msg = dicts["msg"]
    return_info.data = {"html": rrq.html}
    return return_info.todict()


@router.post("/step3/get_article_link")
async def get_article_link(input: InputInfoModel[PaperFaZhiRiBaoDown]):
    """
       根据报纸的首版返回该报纸所有的版面标题和链接 marks:atitle
       :param proxy: 代理
       :param periodDate:报纸日期:20200805
       :return:
    """
    time.sleep(random.randint(1,3))
    proxy = input.proxy
    periodDate = input.data.periodDate
    marks = input.data.marks
    return_info = ReturnInfo()
    url = input.data.url
    this_header = headers.copy()
    rrq = RequestApiHelper.get_rrq()
    kwargs = {
        "rrq": rrq,
        "header": this_header,
        "url": url,
        "timeout": 30,
        "marks": marks,
        "proxy": proxy,
        "moths": MRequest.GET,
        "middler_list": [rrq.status_code_middlerwares, rrq.marks_middlerwares]
    }
    bools, dicts = await RequestApiHelper.etl_remote_meta(**kwargs)
    # rrq = AioHttpRequest()
    # rrq.set_url(url) \
    #     .set_timeout(30).set_marks(marks) \
    #     .set_proxy(proxy).set_header(this_header) \
    #     .set_middler_list(
    #     [rrq.status_code_middlerwares, rrq.marks_middlerwares])
    # bools, dicts = await rrq.run(MRequest.GET)
    return_info.status = bools_string(bools)
    return_info.msg_code = dicts["code"]
    return_info.msg = dicts["msg"]
    return_info.data = {"html": rrq.html}
    return return_info.todict()


#
#
@router.post("/step4/article_detail")
async def article_detail(input: InputInfoModel[PaperFaZhiRiBaoDown]):
    """
       根据文章链接返回文章详情html marks: text_c
       :param proxy: 代理
       :param periodDate: 报纸日期
       :param title: 文章标题
       :param url: 文章链接 例:http://epaper.gmw.cn/gmrb/html/2020-01/08/nw.D110000gmrb_20200108_3-09.htm

       :return:
    """
    time.sleep(random.randint(1,3))
    return_info = ReturnInfo()
    proxy = input.proxy
    this_header = headers.copy()
    url = input.data.url
    marks = input.data.marks
    rrq = RequestApiHelper.get_rrq()
    kwargs = {
        "rrq": rrq,
        "header": this_header,
        "url": url,
        "timeout": 30,
        "marks": marks,
        "proxy": proxy,
        "moths": MRequest.GET,
        "middler_list": [rrq.status_code_middlerwares, rrq.marks_middlerwares]
    }
    bools, dicts = await RequestApiHelper.etl_remote_meta(**kwargs)
    # rrq = AioHttpRequest()
    # rrq.set_url(url) \
    #     .set_timeout(30).set_marks(marks) \
    #     .set_proxy(proxy).set_header(this_header) \
    #     .set_middler_list(
    #     [rrq.status_code_middlerwares, rrq.marks_middlerwares])
    # bools, dicts = await rrq.run(MRequest.GET)
    return_info.status = bools_string(bools)
    return_info.msg_code = dicts["code"]
    return_info.msg = dicts["msg"]
    return_info.data = {"html": rrq.html}
    return return_info.todict()
