from fastapi import APIRouter
from re_common.baselibrary.tools.all_requests.aiohttp_request import AioHttpRequest
from re_common.baselibrary.tools.all_requests.mrequest import MRequest
from re_common.baselibrary.utils.core.mlamada import bools_string

from apps.allsubdb.paper_sciencenet.models import headers, DownMonthsModel
from apps.core.global_model import IdMarksModel
from apps.core.m_route import ContextIncludedRoute
from apps.core.return_info import InputInfoModel, ReturnInfo

router = APIRouter(route_class=ContextIncludedRoute)


@router.post("/step1/get_year_month")
async def get_year_month(input: InputInfoModel[DownMonthsModel]):
    """
    既可以翻页月，又可以获取某天的数据
    获取某年某月的日历，通过日历得到是否有数据，理论上历史数据只需要扫描一次 <br>
    根据网页信息来看无法跳跃月份获取日历，只有前后月获取，依赖内部cookie ,每次 <br>
    cookie值会不一样 <br>

    这个接口可以获取某一天的数据 只是 __EVENTARGUMENT 参数不一样而已，获取月份数据时 __EVENTARGUMENT 前会有一个v <br>
    marks: cellspacing <br>
    :param input: <br>
    :return: <br>
    """
    return_info = ReturnInfo()
    proxy = input.proxy
    target = input.data.target
    targument = input.data.targument
    viewstate = input.data.viewstate
    marks = input.data.marks
    form = {
        "__EVENTTARGET": target,
        "__EVENTARGUMENT": targument,
        "__VIEWSTATE": viewstate
    }
    # 任意一个有效的url 都可以
    url = "http://paper.sciencenet.cn/dz/dzzz_1.aspx?dzsbqkid=35292"

    this_header = headers.copy()
    this_header["Content-Type"] = "application/x-www-form-urlencoded"
    this_header["Origin"] = "http://paper.sciencenet.cn"
    this_header["Referer"] = "http://paper.sciencenet.cn/dz/dzzz_1.aspx?dzsbqkid=35292"
    rrq = AioHttpRequest()
    rrq.set_url(url).set_header(this_header) \
        .set_timeout(30).set_marks(marks) \
        .set_proxy(proxy).set_data(form) \
        .set_middler_list(
        [rrq.status_code_middlerwares, rrq.marks_middlerwares, rrq.end_middlerwares])
    bools, dicts = await rrq.run(MRequest.POST)
    return_info.status = bools_string(bools)
    return_info.msg_code = dicts["code"]
    return_info.msg = dicts["msg"]
    return_info.data = {"html": rrq.html, "url": str(rrq.resp.url)}
    return return_info.todict()


@router.post("/step2/get_one_days")
async def get_page_link(input: InputInfoModel[IdMarksModel]):
    """
    下载某一天的数据 由于日历和某天的数据混在一起的, <br>
    所以下载任意一天有的数据会得到本月的日历，用于初始化入口比较合适 <br>
    下载某一天的数据时 可以通过这个接口get到数据 如果id准确 <br>
    否则 经过 step1 获取 会重定向 <br>
    如：http://paper.sciencenet.cn/dz/dzzz_1.aspx?dzsbqkid=35292 ；<br>
    marks： sec_menu <br>
    :param input: <br>
    :return: <br>
    """
    return_info = ReturnInfo()
    proxy = input.proxy
    # 任意日期的url都会在最新的年月
    id = input.data.id
    marks = input.data.marks
    url = r"http://paper.sciencenet.cn/dz/dzzz_1.aspx?dzsbqkid={}".format(id)
    this_header = headers.copy()
    rrq = AioHttpRequest()
    rrq.set_url(url).set_header(this_header) \
        .set_timeout(30).set_marks(marks) \
        .set_proxy(proxy) \
        .set_middler_list(
        [rrq.status_code_middlerwares, rrq.marks_middlerwares, rrq.end_middlerwares])
    bools, dicts = await rrq.run(MRequest.GET)
    return_info.status = bools_string(bools)
    return_info.msg_code = dicts["code"]
    return_info.msg = dicts["msg"]
    return_info.data = {"html": rrq.html}
    return return_info.todict()


@router.post("/step4/down_article")
async def get_year_month(input: InputInfoModel[IdMarksModel]):
    """
    下载文章内容<br>
    marks: content1 <br>
    :param input:
    :return:
    """
    return_info = ReturnInfo()
    proxy = input.proxy
    url = input.data.id
    # marks = input.data.marks
    # 任意一个有效的url 都可以
    full_url = "http://paper.sciencenet.cn" + url

    this_header = headers.copy()
    # this_header["Referer"] = "http://paper.sciencenet.cn/dz/dzzz_1.aspx?dzsbqkid=35292"
    rrq = AioHttpRequest()
    rrq.set_url(full_url).set_header(this_header) \
        .set_timeout(30) \
        .set_proxy(proxy).set_resp_errors("ignore") \
        .set_middler_list(
        [rrq.status_code_middlerwares])
    bools, dicts = await rrq.run(MRequest.GET)
    stat = 1
    # if dicts["code"] == 211:
    #     # 代表验证没有通过
    #     if rrq.html.find("this.location") > -1:
    #         # 代表无法下载，在网页的表现是跳转到paper页
    #         stat = -1
    #         bools = True
    htmlText = rrq.html
    if htmlText is not None:
        if htmlText.find('blockquote') < 0 and htmlText.find("content1") < 0:
            stat = -1
            return_info.msg_code = dicts["code"]
            return_info.status = bools_string(bools)
            return_info.msg = "查询网页特征值错误"
            return_info.data = {"stat": stat, "url": full_url, "html": rrq.html}
            return return_info.todict()
        else:
            return_info.status = bools_string(bools)
            return_info.msg_code = dicts["code"]
            return_info.msg = dicts["msg"]
            return_info.data = {"stat": stat, "url": full_url, "html": rrq.html}
            return return_info.todict()
    else:
        return_info.status = bools_string(bools)
        return_info.msg_code = dicts["code"]
        return_info.msg = dicts["msg"]
        return_info.data = {"stat": -1, "url": full_url, "html": ''}
        return return_info.todict()

    # return_info.status = bools_string(bools)
    # return_info.msg_code = dicts["code"]
    # return_info.msg = dicts["msg"]
    # return_info.data = {"stat": stat, "url": full_url, "html": rrq.html}
    # return return_info.todict()
