# -*- coding: utf-8 -*-
# @Time    : 2020/5/21 10:12
# @Author  :
# @File    : down_routers.py
# @Software: PyCharm
import datetime
import json
import re
import traceback
from fastapi import Query
from typing import Optional

import requests
from fastapi import APIRouter, Form
from parsel import Selector

from app.allsubdb.paper_jiefangjunbao.models import PaperJieFangJunBaoModel, UpdateHtml
from app.core.return_info import ReturnInfo, SUCCESS, FAILED, MsgCode
from app.sql_app.mmongodb import MongoDBClient

router = APIRouter()


def checkExist(obj):
    if obj != None and len(obj) > 0:
        return True
    else:
        return False


@router.post("/get_page_link")
def get_page_link(*, proxy: str = Form(None), periodDate: str = Form(None)):
    """
       根据报纸的日期返回该报纸所有的版面标题和链接
       :param proxy: 代理
       :param periodDate:报纸日期:20200805
       :return:
    """
    rtn = ReturnInfo()
    headers = {
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
        "Host": "www.81.cn",
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'

    }
    proxies = None
    if proxy:
        proxies = {
            'http': proxy,
            'https': proxy
        }
    try:
        print(r"http://www.81.cn/jfjbmap/content/{}-{}/{}/node_2.htm".format(periodDate[0:4], periodDate[4:6],
                                                                             periodDate[6:8]))
        r = requests.get(
            url=r"http://www.81.cn/jfjbmap/content/{}-{}/{}/node_2.htm".format(periodDate[0:4], periodDate[4:6],
                                                                               periodDate[6:8]),
            headers=headers,
            proxies=proxies
        )
    except:
        print(traceback.format_exc())
        rtn.msg_code = r.status_code
        rtn.status = FAILED
        rtn.msg = "请求错误，没有返回Response"
        return rtn
    if r.status_code != 200:
        rtn.msg_code = r.status_code
        rtn.status = FAILED
        rtn.msg = "返回状态码错误"
        return rtn
    content = r.content
    if content.startswith(b'\xef\xbb\xbf'):  # 去掉 utf8 bom 头
        content = content[3:]
    htmlText = content.decode('utf8').strip()
    if htmlText.find("APP-SectionNav") < 0 and htmlText.find("con002") < 0:
        rtn.msg_code = r.status_code
        rtn.status = FAILED
        rtn.msg = "查询网页特征值错误"
        return rtn
    if len(htmlText.strip()) == 0:
        rtn.status = FAILED
        rtn.msg_code = MsgCode.PAGE_BLANK
        rtn.msg = "资源不存在"
        return rtn
    sel = Selector(text=htmlText)
    list_a = []
    if htmlText.find("APP-SectionNav") > 0:
        list_a = sel.xpath("//ul[@id='APP-SectionNav']/li/a")
    else:
        list_a = sel.xpath("//div[@class='con002']/ul/li/a")
    if len(list_a) == 0:
        rtn.status = FAILED
        rtn.msg = "当前日期无版面链接"
        return rtn
    list_link = []
    for ainfo in list_a:
        list_link.append({
            "title": ainfo.xpath("./text()").extract()[0].strip(),
            "url": "http://www.81.cn/jfjbmap/content/{}-{}/{}/".format(periodDate[0:4], periodDate[4:6],
                                                                       periodDate[6:8]) +
                   ainfo.xpath("./@href").extract()[0].strip()
        })
    rtn.status = SUCCESS
    rtn.msg = "下载成功"
    dic = {
        "periodDate": periodDate,
        "count": len(list_link),
        "pageLink": list_link
    }
    rtn.data = dic
    return rtn.todict()


@router.post("/get_article_link")
def get_article_link(*, proxy: str = Form(None), periodDate: str = Form(...), pageTitle: str = Form(None),
                     url: str = Form(...)):
    """
        根据版面链接返回版面所有的文章标题和链接
        :param proxy: 代理
        :param periodDate: 报纸日期
        :param pageTitle: 版面名称
        :param url: 版面链接
        :return:
    """
    rtn = ReturnInfo()
    headers = {
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
        "Host": "www.81.cn",
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'

    }
    proxies = None
    if proxy:
        proxies = {
            'http': proxy,
            'https': proxy
        }
    try:
        r = requests.get(
            url=url,
            headers=headers,
            proxies=proxies
        )
    except:
        print(traceback.format_exc())
        rtn.msg_code = r.status_code
        rtn.status = FAILED
        rtn.msg = "请求错误，没有返回Response"
        return rtn
    if r.status_code != 200:
        rtn.msg_code = r.status_code
        rtn.status = FAILED
        rtn.msg = "返回状态码错误"
        return rtn
    content = r.content
    if content.startswith(b'\xef\xbb\xbf'):  # 去掉 utf8 bom 头
        content = content[3:]
    htmlText = content.decode('utf8').strip()
    if len(htmlText.strip()) == 0:
        rtn.status = FAILED
        rtn.msg_code = MsgCode.PAGE_BLANK
        rtn.msg = "资源不存在"
        return rtn
    if htmlText.find("newslist-box-inner") < 0 and htmlText.find("con001") < 0:
        rtn.status = FAILED
        rtn.msg = "查询网页特征值错误"
        return rtn
    sel = Selector(text=htmlText)
    list_a = []
    if htmlText.find("newslist-box-inner") > 0:
        list_a = sel.xpath("//ul[@id='APP-NewsList']/li/a")
    else:
        list_a = sel.xpath("//div[@class='con001']/ul/table/tr/td[2]/a")
    if len(list_a) == 0:
        rtn.status = FAILED
        rtn.msg = "当前版面无文章链接"
        return rtn
    list_link = []
    for i, ainfo in enumerate(list_a):
        list_link.append({
            "index": i + 1,
            "title": ainfo.xpath("string(.)").extract()[0].strip(),
            "url": "http://www.81.cn/jfjbmap/content/{}-{}/{}/".format(periodDate[0:4], periodDate[4:6],
                                                                       periodDate[6:8]) +
                   ainfo.xpath("./@href").extract()[0].strip()
        })
    rtn.status = SUCCESS
    rtn.msg = "下载成功"
    dic = {
        "periodDate": periodDate,
        "pagename": pageTitle,
        "count": len(list_link),
        "articleLink": list_link
    }
    rtn.data = dic
    return rtn.todict()


@router.post("/article_detail")
def article_detail(*, proxy: str = Form(None), periodDate: str = Form(None), title: str = Form(None),
                   url: str = Form(None)):
    """
       根据文章链接返回文章详情html
       :param proxy: 代理
       :param periodDate: 报纸日期
       :param title: 文章标题
       :param url: 文章链接
       :return:
    """
    rtn = ReturnInfo()
    headers = {
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
        "Host": "www.81.cn",
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'

    }
    proxies = None
    if proxy:
        proxies = {
            'http': proxy,
            'https': proxy
        }
    try:
        r = requests.get(
            url=url,
            headers=headers,
            proxies=proxies
        )
        r.encoding = r.apparent_encoding
    except:
        print(traceback.format_exc())
        rtn.msg_code = r.status_code
        rtn.status = FAILED
        rtn.msg = "请求错误，没有返回Response"
        return rtn
    if r.status_code != 200:
        rtn.msg_code = r.status_code
        rtn.status = FAILED
        rtn.msg = "返回状态码错误"
        return rtn
    htmlText = r.text
    # if content.startswith(b'\xef\xbb\xbf'):  # 去掉 utf8 bom 头
    #     content = content[3:]
    # htmlText = content.decode('utf8').strip()
    if htmlText.find("页面不存在") != -1 or htmlText.find("已删除") != -1:
        rtn.status = FAILED
        rtn.msg_code = MsgCode.NO_RESOURCE
        rtn.msg = "资源不存在"
        return rtn
    if htmlText.find("APP-PreTitle") < 0 and htmlText.find("content") < 0:
        if len(htmlText.strip()) == 0:
            rtn.status = FAILED
            rtn.msg_code = MsgCode.PAGE_BLANK
            rtn.msg = "资源不存在"
            return rtn
        rtn.status = FAILED
        rtn.msg = "查询网页特征值错误"
        return rtn
    rtn.status = SUCCESS
    rtn.msg = "下载成功"
    dic = {
        "periodDate": periodDate,
        "title": title,
        "url": url,
        "html": htmlText
    }
    rtn.data = dic
    return rtn.todict()

@router.post("/save_page")
async def parse_detail(paperjiefangjunbao: PaperJieFangJunBaoModel):
    return_info = ReturnInfo()
    col_paper = MongoDBClient.db30_1.client.htmljson.paper_jiefangjunbao
    cjip = await col_paper.find_one({"_id": paperjiefangjunbao.cid})
    if cjip:
        cjip["years"] = paperjiefangjunbao.years
        cjip["moths"] = paperjiefangjunbao.moths
        cjip["days"] = paperjiefangjunbao.days
        cjip["days_url"] = paperjiefangjunbao.days_url
        cjip["stat"] = paperjiefangjunbao.stat
        cjip["stathtml"] = paperjiefangjunbao.stathtml
        cjip["versions_title"] = paperjiefangjunbao.versions_title
        cjip["versions_url"] = paperjiefangjunbao.versions_url
        cjip["update_time"] = datetime.datetime.now()
        result = await col_paper.replace_one({'_id': paperjiefangjunbao.cid}, cjip)
        return_info.status = SUCCESS
        return_info.data = {"boos": cjip["_id"]}
        return return_info.todict()
    else:
        doc = {}
        doc["_id"] = paperjiefangjunbao.cid
        doc["years"] = paperjiefangjunbao.years
        doc["moths"] = paperjiefangjunbao.moths
        doc["days"] = paperjiefangjunbao.days
        doc["days_url"] = paperjiefangjunbao.days_url
        doc["stat"] = paperjiefangjunbao.stat
        doc["stathtml"] = paperjiefangjunbao.stathtml
        doc["versions_title"] = paperjiefangjunbao.versions_title
        doc["versions_url"] = paperjiefangjunbao.versions_url
        doc["jsondicts"] = paperjiefangjunbao.jsondicts
        doc["update_time"] = datetime.datetime.now()
        doc["create_time"] = datetime.datetime.now()

        result = await col_paper.insert_one(doc)

        return_info.status = SUCCESS
        return_info.data = {"boos": repr(result.inserted_id)}
        return return_info.todict()


@router.post("/count_days")
async def parse_detail(paperjiefangjunbao: PaperJieFangJunBaoModel):
    return_info = ReturnInfo()
    col_paper = MongoDBClient.db30_1.client.htmljson.paper_jiefangjunbao
    cursor = col_paper.find({"days": paperjiefangjunbao.days})
    cjip = await cursor.to_list(length=100)
    if cjip:
        return_info.status = SUCCESS
        return_info.data = {"count": len([str(j["_id"]) for j in cjip])}
        return return_info.todict()
    else:
        return_info.status = FAILED
        return_info.msg = "查询不存在"
        return return_info.todict()


@router.get("/select_url")
async def parse_detail(count: Optional[int] = Query(10, le=1000)):
    return_info = ReturnInfo()
    col_paper = MongoDBClient.db30_1.client.htmljson.paper_jiefangjunbao
    cursor = col_paper.find({"stat": 0})
    cjip = await cursor.to_list(length=count)
    if cjip:
        return_info.status = SUCCESS
        i = 0
        lists = []
        for j in cjip:
            i = i + 1
            lists.append(
                {"id": str(j["_id"]), "url": j["versions_url"], "title": j["versions_title"], "days": j["days"]})
            if i >= count:
                break
        return_info.data = lists
        return return_info.todict()
    else:
        return_info.status = FAILED
        return_info.msg = "无状态为0的数据"
        return_info.data = []
        return return_info.todict()


@router.post("/update_dicts")
async def parse_detail(cid: str, jsondicts: dict, is_update_all: bool = False):
    """
    更新mongodb 的jsondicts字段 is_update_all 代表完全按照传上来的jsondicts进行更新
    :param count:
    :return:
    """
    return_info = ReturnInfo()
    col_paper = MongoDBClient.db30_1.client.htmljson.paper_jiefangjunbao
    cjip = await col_paper.find_one({"_id": cid})
    if cjip:
        if is_update_all:
            cjip["jsondicts"] = jsondicts
            cjip["update_time"] = datetime.datetime.now()
            result = await col_paper.replace_one({'_id': cid}, cjip)
        else:
            is_have_dicts = {}
            jsondicts_mongo = cjip["jsondicts"]
            for k,v in jsondicts_mongo.items():
                if k == "count":
                    continue
                is_have_dicts[v["url"]] = k
            for k, v in jsondicts.items():
                if k == "count":
                    continue
                if v["url"] in is_have_dicts.keys():
                    mongo_k = is_have_dicts[v["url"]]
                    jsondicts[k]["stat"] = jsondicts_mongo[mongo_k]["stat"]
                    jsondicts[k]["html"] =jsondicts_mongo[mongo_k]["html"]
                    jsondicts[k]["down_date"] =jsondicts_mongo[mongo_k]["down_date"]

            cjip["jsondicts"] = jsondicts
            cjip["update_time"] = datetime.datetime.now()
            result = await col_paper.replace_one({'_id': cid}, cjip)
        return_info.status = SUCCESS
        return_info.data = str(cjip['_id'])
        return return_info.todict()
    else:
        return_info.status = FAILED
        return_info.msg = "无该id的数据"
        return_info.data = []
        return return_info.todict()


@router.get("/update_stat")
async def parse_detail(cid: str, stat: int):
    """
    更新mongodb 的jsondicts字段 is_update_all 代表完全按照传上来的jsondicts进行更新
    :param count:
    :return:
    """
    return_info = ReturnInfo()
    col_paper = MongoDBClient.db30_1.client.htmljson.paper_jiefangjunbao
    cjip = await col_paper.find_one({"_id": cid})
    if cjip:
        cjip["stat"] = stat
        cjip["update_time"] = datetime.datetime.now()
        result = await col_paper.replace_one({'_id': cid}, cjip)

        return_info.status = SUCCESS
        return_info.data = str(cjip['_id'])
        return return_info.todict()
    else:
        return_info.status = FAILED
        return_info.msg = "无该id的数据"
        return_info.data = []
        return return_info.todict()


@router.get("/select_html_url")
async def parse_detail(stat: int, stathtml: int, count: int = 10):
    """
    更新mongodb 的jsondicts字段 is_update_all 代表完全按照传上来的jsondicts进行更新
    :param count:
    :return:
    """
    return_info = ReturnInfo()
    col_paper = MongoDBClient.db30_1.client.htmljson.paper_jiefangjunbao
    cursor = col_paper.find({"stat": stat, "stathtml": stathtml})
    cjip = await cursor.to_list(length=count)
    if cjip:
        i = 0
        lists = []
        for j in cjip:
            i = i + 1
            lists.append({"cid": j["_id"], "dicts": j["jsondicts"]})
            if i >= count:
                break
        return_info.status = SUCCESS
        return_info.data = lists
        return return_info.todict()
    else:
        return_info.status = FAILED
        return_info.msg = "无该id的数据"
        return_info.data = []
        return return_info.todict()


@router.post("/update_html")
async def parse_detail(updatehtml: UpdateHtml):
    """
    更新mongodb 的jsondicts字段 is_update_all 代表完全按照传上来的jsondicts进行更新
    :param count:
    :return:
    """
    return_info = ReturnInfo()
    col_paper = MongoDBClient.db30_1.client.htmljson.paper_jiefangjunbao
    cjip = await col_paper.find_one({"_id": updatehtml.cid})
    if cjip:
        dicts = cjip["jsondicts"][updatehtml.keyid]
        dicts["html"] = updatehtml.html
        dicts["down_date"] = updatehtml.down_date
        dicts["stat"] = 1
        cjip["update_time"] = datetime.datetime.now()
        result = await col_paper.replace_one({'_id': updatehtml.cid}, cjip)

        return_info.status = SUCCESS
        return_info.data = cjip["_id"]
        return return_info.todict()
    else:
        return_info.status = FAILED
        return_info.msg = "无该id的数据"
        return_info.data = []
        return return_info.todict()


@router.get("/update_stathtml")
async def parse_detail(cid: str, stathtml: int):
    """
    更新mongodb 的jsondicts字段 is_update_all 代表完全按照传上来的jsondicts进行更新
    :param count:
    :return:
    """
    return_info = ReturnInfo()
    col_paper = MongoDBClient.db30_1.client.htmljson.paper_jiefangjunbao
    cjip = await col_paper.update_one({"_id": cid}, {"$set": {"stathtml": stathtml, "update_time": datetime.datetime.now()}})
    if cjip.upserted_id:
        return_info.status = SUCCESS
        return_info.data = str(cjip.upserted_id)
        return return_info.todict()
    else:
        return_info.status = FAILED
        return_info.msg = "无该id的数据"
        return_info.data = []
        return return_info.todict()


@router.get("/update_html_stat")
def parse_detail(cid: str, keyid: str, stat: int):
    """
    更新mongodb 的jsondicts字段 is_update_all 代表完全按照传上来的jsondicts进行更新
    :param count:
    :return:
    """
    return_info = ReturnInfo()
    col_paper = MongoDBClient.db30_1.client.htmljson.paper_jiefangjunbao
    cjip = col_paper.update_one({"_id": cid}, {
        "$set": {"jsondicts.{}.stat".format(keyid): stat, "update_time": datetime.datetime.now()}})
    if cjip.upserted_id:
        return_info.status = SUCCESS
        return_info.data = str(cjip.upserted_id)
        return return_info.todict()
    else:
        return_info.status = FAILED
        return_info.msg = "无该id的数据"
        return_info.data = []
        return return_info.todict()


@router.post("/parse_detail")
def parse_detail(*, djson: str = Form(None)):
    """
       解析文章详情页
       :return:
    """
    rtn = ReturnInfo()
    djson = json.loads(djson)
    title_catalyst = ""
    title_alt = ""
    abstract = ""
    authors = ""
    td_raw = ""
    if djson["html"] != "":
        sel = Selector(text=djson["html"])
        span_title = sel.xpath("//div[@class='content']/h1/span")
        if checkExist(span_title):
            td_raw = sel.xpath("//div[@class='content']").extract()[0]
            for i,span in enumerate(span_title):
                txt = span.xpath("./text()").extract()
                if txt:
                    if i == 0:
                        title_catalyst = txt[0].strip()
                    elif i == 1:
                        title_alt = txt[0].strip()
                    elif i == 2:
                        authors = txt[0].strip()
        else:
            td_raw = sel.xpath("//div[contains(@class,'article-box')]").extract()[0]
            p_ctitle = sel.xpath("//p[@id='APP-PreTitle']/text()")
            if checkExist(p_ctitle):
                title_catalyst = p_ctitle.extract()[0].strip()
            p_alttitle = sel.xpath("//p[@id='APP-Subtitle']/text()")
            if checkExist(p_alttitle):
                title_alt = p_alttitle.extract()[0].strip()
            p_au = sel.xpath("//p[@id='APP-Author']/text()")
            if checkExist(p_au):
                authors = p_au.extract()[0].strip().replace("■","")
        p_content = sel.xpath("//founder-content//p")
        if checkExist(p_content):
            for p in p_content:
                list_txt = p.xpath("./text()").extract()
                if list_txt:
                    for txt in list_txt:
                        txt = txt.strip()
                        abstract = abstract + txt + "\n"
    rtn.status = SUCCESS
    rtn.msg = "解析成功"
    version = ""
    tmp = re.findall(r"\d+\.?\d*", djson["versions_title"])
    if checkExist(tmp):
        version = tmp[0]
    else:
        rtn.status = FAILED
        rtn.msg = "版面数据错误"
        return rtn.todict()
    index = djson["index"]
    if len(index) == 1:
        index = "0" + index
    dic = {
        "lngid_alt":"JFJB{}{}{}".format(djson["days"],version,index),
        "journal_name": "解放军报",
        'provider_url': djson["url"],
        'title': djson["title"],
        'title_catalyst': title_catalyst,
        'title_alt': title_alt,
        "authors":authors,
        "abstract":abstract,
        'pub_year': djson["years"],
        'pub_date': djson["days"],
        'versions_title': version,
        'index': djson["index"],
        "country":"CN",
        "td_raw": td_raw,
        "language":"ZH",
        "web_site": "http://www.81.cn",
        'down_date': djson["down_date"]
    }
    rtn.data = dic
    return rtn.todict()