# -*- coding: utf-8 -*-
# @Time    : 2020/5/21 10:12
# @Author  :
# @File    : down_routers.py
# @Software: PyCharm
import datetime
import json
import re
import traceback
from parsel import Selector
import requests
from fastapi import APIRouter, Form

from app.allsubdb.paper_jingjiribao.models import PaperJinjiribao, PaperJinjiribaoModel, UpdateHtml
from app.core.return_info import ReturnInfo, SUCCESS, FAILED, MsgCode

router = APIRouter()


def checkExist(obj):
    if obj != None and len(obj) > 0:
        return True
    else:
        return False


@router.post("/get_day_link")
def get_day_link(*, proxy: str = Form(None), years: str = Form(None), months: str = Form(None)):
    """
    经济日报根据年和月返回当月每日的报纸链接
    :param proxy: 代理
    :param years: 年份:2008
    :param months: 月份:01
    :return:
    """
    if len(months) == 1:
        months = "0" + months
    rtn = ReturnInfo()
    headers = {
        "Accept": "text/javascript, text/html, application/xml, text/xml, */*",
        "Host": "paper.ce.cn",
        "X-Requested-With": "XMLHttpRequest",
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'

    }
    proxies = None
    if proxy:
        proxies = {
            'http': proxy,
            'https': proxy
        }
    r = None
    try:
        r = requests.get(
            url="http://paper.ce.cn/jjrb/html/{}-{}/period.xml".format(years, months),
            headers=headers,
            proxies=proxies
        )
    except:
        print(traceback.format_exc())
        rtn.status = FAILED
        rtn.msg = "请求错误，没有返回Response"
        return rtn
    if r.status_code != 200:
        rtn.msg_code = r.status_code
        rtn.status = FAILED
        rtn.msg = "返回状态码错误"
        return rtn
    content = r.content
    if content.startswith(b'\xef\xbb\xbf'):  # 去掉 utf8 bom 头
        content = content[3:]
    htmlText = content.decode('utf8').strip()
    if htmlText.find("periodlist") < 0:
        rtn.msg_code = r.status_code
        rtn.status = FAILED
        rtn.msg = "查询网页特征值错误"
        return rtn
    sel = Selector(text=htmlText)
    list_period = sel.xpath("//periodlist/period")
    if len(list_period) == 0:
        rtn.status = FAILED
        rtn.msg = "当前月份无报纸链接"
        return rtn
    list_link = []
    for per in list_period:
        periodDate = per.xpath("./period_date/text()").extract()[0].strip()
        tmp = periodDate.split("-")
        list_link.append({
            "periodDate": periodDate.replace("-", ""),
            "url": "http://paper.ce.cn/jjrb/html/{}-{}/{}/".format(tmp[0], tmp[1], tmp[2]) +
                   per.xpath("./front_page/text()").extract()[0].strip()
        })
    rtn.status = SUCCESS
    rtn.msg = "下载成功"
    dic = {
        "years": years,
        "months": months,
        "count": len(list_link),
        "dayLink": list_link
    }
    rtn.data = dic
    return rtn.todict()


@router.post("/get_page_link")
def get_page_link(*, proxy: str = Form(None), periodDate: str = Form(None), url: str = Form(None)):
    """
    根据报纸的链接返回该报纸所有的版面标题和链接
    :param proxy: 代理
    :param periodDate:报纸日期:20200805
    :param url: 报纸链接
    :return:
    """
    rtn = ReturnInfo()
    headers = {
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
        "Host": "paper.ce.cn",
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'

    }
    proxies = None
    if proxy:
        proxies = {
            'http': proxy,
            'https': proxy
        }
    # print(proxies)
    # print(url)
    # print(periodDate)
    try:
        r = requests.get(
            url=url,
            headers=headers,
            proxies=proxies
        )
    except:
        print(traceback.format_exc())
        rtn.msg_code = r.status_code
        rtn.status = FAILED
        rtn.msg = "请求错误，没有返回Response"
        return rtn
    if r.status_code != 200:
        rtn.msg_code = r.status_code
        rtn.status = FAILED
        rtn.msg = "返回状态码错误"
        return rtn
    content = r.content
    if content.startswith(b'\xef\xbb\xbf'):  # 去掉 utf8 bom 头
        content = content[3:]
    htmlText = content.decode('utf8').strip()
    if htmlText.find("第01版") < 0:
        rtn.msg_code = r.status_code
        rtn.status = FAILED
        rtn.msg = "查询网页特征值错误"
        return rtn
    sel = Selector(text=htmlText)
    list_a = sel.xpath("//div[contains(@style,'overflow-y:scroll')]")[0].xpath("./table/tbody/tr/td/a")
    if len(list_a) == 0:
        rtn.status = FAILED
        rtn.msg = "当前日期无版面链接"
        return rtn
    list_link = []
    for ainfo in list_a:
        title = ainfo.xpath("./text()").extract()[0].strip()
        if len(title) == 0:
            continue
        list_link.append({
            "title": title,
            "url": "http://paper.ce.cn/jjrb/html/{}-{}/{}/".format(periodDate[0:4], periodDate[4:6], periodDate[6:8]) +
                   ainfo.xpath("./@href").extract()[0].strip()
        })
    rtn.status = SUCCESS
    rtn.msg = "下载成功"
    dic = {
        "periodDate": periodDate,
        "count": len(list_link),
        "pageLink": list_link
    }
    rtn.data = dic
    return rtn.todict()


@router.post("/get_article_link")
def get_article_link(*, proxy: str = Form(None), periodDate: str = Form(...), pageTitle: str = Form(None),
                     url: str = Form(...)):
    """
    根据版面链接返回版面所有的文章标题和链接
    :param proxy: 代理
    :param periodDate: 报纸日期
    :param pageTitle: 版面名称
    :param url: 版面链接
    :return:
    """
    rtn = ReturnInfo()
    headers = {
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
        "Host": "paper.ce.cn",
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'

    }
    proxies = None
    if proxy:
        proxies = {
            'http': proxy,
            'https': proxy
        }
        print(proxies)
    try:
        r = requests.get(
            url=url,
            headers=headers,
            proxies=proxies
        )
    except:
        print(traceback.format_exc())
        rtn.msg_code = ""
        rtn.status = FAILED
        rtn.msg = "请求错误，没有返回Response"
        return rtn
    if r.status_code != 200:
        rtn.msg_code = r.status_code
        rtn.status = FAILED
        rtn.msg = "返回状态码错误"
        return rtn
    content = r.content
    if content.startswith(b'\xef\xbb\xbf'):  # 去掉 utf8 bom 头
        content = content[3:]
    htmlText = content.decode('utf8').strip()
    if len(htmlText.strip()) == 0:
        rtn.status = FAILED
        rtn.msg_code = MsgCode.PAGE_BLANK
        rtn.msg = "资源不存在"
        return rtn
    if htmlText.find("第01版") < 0:
        rtn.status = FAILED
        rtn.msg = "查询网页特征值错误"
        return rtn
    sel = Selector(text=htmlText)
    list_a = sel.xpath("//div[contains(@style,'overflow-y:scroll')]")[1].xpath("./table/tbody/tr/td/a")
    if len(list_a) == 0:
        rtn.status = FAILED
        rtn.msg = "当前版面无文章链接"
        return rtn
    list_link = []
    for i, ainfo in enumerate(list_a):
        list_link.append({
            "index": i + 1,
            "title": ainfo.xpath("./div/text()").extract()[0].strip(),
            "url": "http://paper.ce.cn/jjrb/html/{}-{}/{}/".format(periodDate[0:4], periodDate[4:6], periodDate[6:8]) +
                   ainfo.xpath("./@href").extract()[0].strip()
        })
    rtn.status = SUCCESS
    rtn.msg = "下载成功"
    dic = {
        "periodDate": periodDate,
        "pagename": pageTitle,
        "count": len(list_link),
        "articleLink": list_link
    }
    rtn.data = dic
    return rtn.todict()


@router.post("/article_detail")
def article_detail(*, proxy: str = Form(None), url: str = Form(None)):
    """
       根据文章链接返回文章详情html
       :param proxy: 代理
       :param periodDate: 报纸日期
       :param title: 文章标题
       :param url: 文章链接
       :return:
    """
    rtn = ReturnInfo()
    headers = {
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
        "Host": "paper.ce.cn",
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'

    }
    proxies = None
    if proxy:
        proxies = {
            'http': proxy,
            'https': proxy
        }
    try:
        r = requests.get(
            url=url,
            headers=headers,
            proxies=proxies
        )
        r.encoding = r.apparent_encoding
    except:
        print(traceback.format_exc())
        rtn.msg_code = ""
        rtn.status = FAILED
        rtn.msg = "请求错误，没有返回Response"
        return rtn
    if r.status_code != 200:
        rtn.msg_code = r.status_code
        rtn.status = FAILED
        rtn.msg = "返回状态码错误"
        return rtn
    # content = r.content
    # if content.startswith(b'\xef\xbb\xbf'):  # 去掉 utf8 bom 头
    #     content = content[3:]
    # htmlText = content.decode('utf8').strip()
    htmlText = r.text
    if htmlText.find("页面不存在") != -1 or htmlText.find("已删除") != -1:
        rtn.status = FAILED
        rtn.msg_code = MsgCode.NO_RESOURCE
        rtn.msg = "资源不存在"
        return rtn
    if htmlText.find("font01") < 0:
        if len(htmlText.strip()) == 0:
            rtn.status = FAILED
            rtn.msg_code = MsgCode.PAGE_BLANK
            rtn.msg = "资源不存在"
            return rtn
        rtn.status = FAILED
        rtn.msg = "查询网页特征值错误"
        return rtn
    rtn.status = SUCCESS
    rtn.msg = "下载成功"
    dic = {
        "url": url,
        "html": htmlText
    }
    rtn.data = dic
    return rtn.todict()

@router.post("/save_page")
def parse_detail(paperjinjiribao: PaperJinjiribaoModel):
    return_info = ReturnInfo()
    cjip = PaperJinjiribao.objects(cid=paperjinjiribao.cid)
    if cjip:
        cjip.update(
            cid=paperjinjiribao.cid,
            years=paperjinjiribao.years,
            moths=paperjinjiribao.moths,
            days=paperjinjiribao.days,
            days_url=paperjinjiribao.days_url,
            stat=paperjinjiribao.stat,
            stathtml=paperjinjiribao.stathtml,
            versions_title=paperjinjiribao.versions_title,
            versions_url=paperjinjiribao.versions_url,
            update_time=datetime.datetime.now())
        return_info.status = SUCCESS
        return_info.data = {"boos": [str(j.cid) for j in cjip]}
        return return_info.todict()
    else:
        cjip = PaperJinjiribao(cid=paperjinjiribao.cid,
                               years=paperjinjiribao.years,
                               moths=paperjinjiribao.moths,
                               days=paperjinjiribao.days,
                               days_url=paperjinjiribao.days_url,
                               stat=paperjinjiribao.stat,
                               stathtml=paperjinjiribao.stathtml,
                               versions_title=paperjinjiribao.versions_title,
                               versions_url=paperjinjiribao.versions_url,
                               jsondicts=paperjinjiribao.jsondicts,
                               update_time=datetime.datetime.now(),
                               create_time=datetime.datetime.now())
        cjip.save()
        return_info.status = SUCCESS
        return_info.data = {"boos": str(cjip.id)}
        return return_info.todict()


@router.post("/count_days")
def parse_detail(paperjinjiribao: PaperJinjiribaoModel):
    return_info = ReturnInfo()
    cjip = PaperJinjiribao.objects(days=paperjinjiribao.days)
    if cjip:
        return_info.status = SUCCESS
        return_info.data = {"count": len([str(j.cid) for j in cjip])}
        return return_info.todict()
    else:
        return_info.status = FAILED
        return_info.msg = "查询不存在"
        return return_info.todict()


@router.get("/select_url")
def parse_detail(count: int = 10):
    return_info = ReturnInfo()
    cjip = PaperJinjiribao.objects(stat=0)
    if cjip:
        return_info.status = SUCCESS
        i = 0
        lists = []
        for j in cjip:
            i = i + 1
            lists.append({"id": str(j.cid), "url": j.versions_url, "title": j.versions_title, "days": j.days})
            if i >= count:
                break

        return_info.data = lists
        return return_info.todict()
    else:
        return_info.status = FAILED
        return_info.msg = "无状态为0的数据"
        return_info.data = []
        return return_info.todict()


@router.post("/update_dicts")
def parse_detail(cid: str, jsondicts: dict, is_update_all: bool = False):
    """
    更新mongodb 的jsondicts字段 is_update_all 代表完全按照传上来的jsondicts进行更新
    :param count:
    :return:
    """
    return_info = ReturnInfo()
    cjip = PaperJinjiribao.objects(cid=cid)
    if cjip:
        cjip = cjip.first()
        if is_update_all:
            cjip.update(
                jsondicts=jsondicts,
                update_time=datetime.datetime.now())
        else:
            is_have_dicts = {}
            jsondicts_mongo = cjip["jsondicts"]
            for k, v in jsondicts_mongo.items():
                if k == "count":
                    continue
                is_have_dicts[v["url"]] = k
            for k, v in jsondicts.items():
                if k == "count":
                    continue
                if v["url"] in is_have_dicts.keys():
                    mongo_k = is_have_dicts[v["url"]]
                    jsondicts[k]["stat"] = jsondicts_mongo[mongo_k]["stat"]
                    jsondicts[k]["html"] = jsondicts_mongo[mongo_k]["html"]
                    jsondicts[k]["down_date"] = jsondicts_mongo[mongo_k]["down_date"]

            cjip.update(
                jsondicts=jsondicts,
                update_time=datetime.datetime.now())
        return_info.status = SUCCESS
        return_info.data = str(cjip.cid)
        return return_info.todict()
    else:
        return_info.status = FAILED
        return_info.msg = "无该id的数据"
        return_info.data = []
        return return_info.todict()


@router.get("/update_stat")
def parse_detail(cid: str, stat: int):
    """
    更新mongodb 的jsondicts字段 is_update_all 代表完全按照传上来的jsondicts进行更新
    :param count:
    :return:
    """
    return_info = ReturnInfo()
    cjip = PaperJinjiribao.objects(cid=cid)
    if cjip:
        cjip = cjip.first()
        cjip.update(
            stat=stat,
            update_time=datetime.datetime.now())

        return_info.status = SUCCESS
        return_info.data = str(cjip.cid)
        return return_info.todict()
    else:
        return_info.status = FAILED
        return_info.msg = "无该id的数据"
        return_info.data = []
        return return_info.todict()


@router.get("/update_html_stat")
def parse_detail(cid: str, keyid: str, stat: int):
    """
    更新mongodb 的jsondicts字段 is_update_all 代表完全按照传上来的jsondicts进行更新
    :param count:
    :return:
    """
    return_info = ReturnInfo()
    cjip = PaperJinjiribao.objects(cid=cid)
    if cjip:
        cjip = cjip.first()
        cjip.jsondicts[keyid]["stat"] = stat
        cjip.save()
        return_info.status = SUCCESS
        return_info.data = str(cjip.cid)
        return return_info.todict()
    else:
        return_info.status = FAILED
        return_info.msg = "无该id的数据"
        return_info.data = []
        return return_info.todict()


@router.get("/select_html_url")
def parse_detail(stat: int, stathtml: int, count: int = 10):
    """
    更新mongodb 的jsondicts字段 is_update_all 代表完全按照传上来的jsondicts进行更新
    :param count:
    :return:
    """
    return_info = ReturnInfo()
    cjip = PaperJinjiribao.objects(stat=stat, stathtml=stathtml)
    if cjip:
        i = 0
        lists = []
        for j in cjip:
            i = i + 1
            lists.append({"cid": j.cid, "dicts": j.jsondicts})
            if i >= count:
                break
        return_info.status = SUCCESS
        return_info.data = lists
        return return_info.todict()
    else:
        return_info.status = FAILED
        return_info.msg = "无该id的数据"
        return_info.data = []
        return return_info.todict()


@router.post("/update_html")
def parse_detail(updatehtml: UpdateHtml):
    """
    更新mongodb 的jsondicts字段 is_update_all 代表完全按照传上来的jsondicts进行更新
    :param count:
    :return:
    """
    return_info = ReturnInfo()
    cjip = PaperJinjiribao.objects(cid=updatehtml.cid)
    if cjip:
        cjip = cjip.first()
        dicts = cjip.jsondicts[updatehtml.keyid]
        dicts["html"] = updatehtml.html
        dicts["down_date"] = updatehtml.down_date
        dicts["stat"] = 1
        cjip.update_time = datetime.datetime.now()
        cjip.save()
        return_info.status = SUCCESS
        return_info.data = cjip.cid
        return return_info.todict()
    else:
        return_info.status = FAILED
        return_info.msg = "无该id的数据"
        return_info.data = []
        return return_info.todict()


@router.get("/update_stathtml")
def parse_detail(cid: str, stathtml: int):
    """
    更新mongodb 的jsondicts字段 is_update_all 代表完全按照传上来的jsondicts进行更新
    :param count:
    :return:
    """
    return_info = ReturnInfo()
    cjip = PaperJinjiribao.objects(cid=cid)
    if cjip:
        cjip = cjip.first()
        cjip.update(
            stathtml=stathtml,
            update_time=datetime.datetime.now())

        return_info.status = SUCCESS
        return_info.data = str(cjip.cid)
        return return_info.todict()
    else:
        return_info.status = FAILED
        return_info.msg = "无该id的数据"
        return_info.data = []
        return return_info.todict()

def checkName(nameStr):
    flag = False
    tmp = nameStr.split(" ")
    if len(tmp) > 0:
        for name in tmp:
            if len(name) < 5:
                flag = True
                break
    return flag

def fkName(name):
    rmlist = ["记者", "通讯员", "本报", "评论员", "董事长", "院长"]
    flag = False
    for rm in rmlist:
        if rm in name:
            flag = True
            break
    return flag

@router.post("/parse_detail")
def parse_detail(*, djson: str = Form(None)):
    """
       解析文章详情页 
       :return:
    """
    rtn = ReturnInfo()
    djson = json.loads(djson)
    title_catalyst = ""
    title_alt = ""
    abstract = ""
    authors = ""
    sel = Selector(text=djson["html"])
    td_talt = sel.xpath("//td[contains(@class,'font02')]")
    if len(td_talt) > 3:
        rtn.status = FAILED
        rtn.msg = "标题解析错误"
        return rtn.todict()
    if checkExist(td_talt):
        for i,td in enumerate(td_talt):
            txt = td.xpath("./text()").extract()
            if txt:
                txt = txt[0].strip()
            else:
                txt = ""
            if len(txt) > 0:
                if i == 0:
                    title_catalyst = txt
                elif i == 1:
                    if fkName(txt) or len(txt) < 5 or checkName(txt):
                        authors = txt
                    else:
                        title_alt = txt
                elif i == 2:
                    if fkName(txt) or len(txt) < 5 or checkName(txt):
                        authors = txt
                    else:
                        if len(title_alt) == 0:
                            title_alt = txt
    p_content = sel.xpath("//founder-content/p")
    if checkExist(p_content):
        for p in p_content:
            txt = p.xpath("./text()").extract()
            if txt:
                txt = txt[0].strip()
                abstract = abstract + txt + "\n"
    rtn.status = SUCCESS
    rtn.msg = "解析成功"
    version = ""
    tmp = re.findall(r"\d+\.?\d*", djson["versions_title"])
    if checkExist(tmp):
        version = tmp[0]
    else:
        rtn.status = FAILED
        rtn.msg = "版面数据错误"
        return rtn.todict()
    index = djson["index"]
    if len(index) == 1:
        index = "0" + index
    dic = {
        "lngid_alt":"JJRB{}{}{}".format(djson["days"],version,index),
        "journal_name": "经济日报",
        'provider_url': djson["url"],
        'title': djson["title"],
        'title_catalyst': title_catalyst,
        'title_alt': title_alt,
        "authors":authors,
        "abstract":abstract,
        'pub_year': djson["years"],
        'pub_date': djson["days"],
        'versions_title': version,
        'index': djson["index"],
        "country":"CN",
        "td_raw": "".join(td_talt.extract()),
        "language":"ZH",
        "web_site": "http://wwww.paper.ce.cn/",
        'down_date': djson["down_date"]
    }
    rtn.data = dic
    return rtn.todict()
