# -*- coding: utf-8 -*-
# @Time    : 2020/5/21 10:12
# @Author  :
# @File    : down_routers_old.py
# @Software: PyCharm
import datetime
import json
import re
import traceback

from bs4 import BeautifulSoup
from fastapi import Query
from typing import Optional

import requests
from fastapi import APIRouter, Form
from parsel import Selector

# from app.allsubdb.paper_beijingribao.models import PaperBeiJingRiBaoModel, UpdateHtml
from apps.allsubdb.paper_beijingribao.models import PaperBeiJingRiBaoModel, UpdateHtml, PaperBeiJingRiBaoForm, \
    PaperBeiJingRiBaoArticle, PaperBeiJingRiBaoParse
from apps.core.return_info import ReturnInfo, SUCCESS, FAILED, InputInfoModel
# from app.sql_app.mmongodb import MongoDBClient
from re_common.baselibrary.utils.core.requests_core import MsgCode

from apps.sql_app.mmongodb import MongoDBClient

router = APIRouter()


def checkExist(obj):
    if obj != None and len(obj) > 0:
        return True
    else:
        return False


@router.post("/get_page_link")
def get_page_link(BJRBcg: InputInfoModel[PaperBeiJingRiBaoForm]):
    """
       根据报纸的日期返回该报纸所有的版面标题和链接
       :param proxy: 代理
       :param periodDate:报纸日期:20200805
       :return:
    """
    proxy = BJRBcg.proxy
    periodDate = BJRBcg.data.periodDate
    rtn = ReturnInfo()
    headers = {
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
        "Host": "bjrbdzb.bjd.com.cn",
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'

    }
    rule = re.compile(r"第[0-9]*版")
    proxies = None
    if proxy:
        proxies = {
            'http': proxy,
            'https': proxy
        }
    try:
        # print(r"https://bjrbdzb.bjd.com.cn/bjrb/mobile/{}/{}/{}_m.html".format(periodDate[0:4], periodDate, periodDate))
        r = requests.get(
            url=r"https://bjrbdzb.bjd.com.cn/bjrb/mobile/{}/{}/{}_m.html".format(periodDate[0:4], periodDate,
                                                                                 periodDate),
            headers=headers,
            proxies=proxies
        )
        r.encoding = r.apparent_encoding
    except:
        print(traceback.format_exc())
        rtn.msg_code = r.status_code
        rtn.status = FAILED
        rtn.msg = "请求错误，没有返回Response"
        return rtn
    if r.status_code != 200:
        if r.status_code == 404:
            if r.text and r.text.find("找不到文件或目录") != -1:
                rtn.msg_code = r.status_code
                rtn.status = MsgCode.NO_RESOURCE
                rtn.msg = "网页明确无资源"
                return rtn
        rtn.msg_code = r.status_code
        rtn.status = FAILED
        rtn.msg = "返回状态码错误"
        return rtn

    htmlText = r.text
    sel = Selector(text=htmlText)
    list_b = sel.xpath("//div[@class='nav-panel-primary']/div")
    # counts = len(list_b)
    dic = dict()
    dic_details = dict()
    list_one = list()

    for binfo in list_b:
        title_forward = binfo.xpath('./div/text()').extract()[0].strip()
        # 版面title
        versions_title_forward = rule.sub("", title_forward).strip()
        page = rule.findall(title_forward)[0].replace("第", '').replace("版", '')
        versions_title = "({})".format(page.zfill(2)) + versions_title_forward
        # 版面url
        versions_url = r"https://bjrbdzb.bjd.com.cn/bjrb/mobile/{}/{}/{}_m.html".format(periodDate[0:4], periodDate,
                                                                                        periodDate) + "#page" + str(
            int(page) - 1)
        cid = periodDate + "page" + str(int(page))
        urls_tag = binfo.xpath("./ul/li/a")
        dic_detail = dict()
        title = ""
        i = 0
        counts = len(urls_tag)
        dic_detail["count"] = counts
        for url2tag in urls_tag:
            i += 1
            dic_detail1 = dict()
            # 单篇title
            title_x = url2tag.xpath("./text()").extract()
            if len(title_x) != 0:
                title = title_x[0].strip()
            url3 = url2tag.xpath("./@data-href").extract()[0].strip().strip(".")
            # 单篇url
            url_real = "https://bjrbdzb.bjd.com.cn/bjrb/mobile/{}/{}".format(periodDate[0:4], periodDate) + url3
            dic_detail1["title"] = title
            dic_detail1["url"] = url_real
            dic_detail1["stat"] = 0
            dic_detail1["html"] = ''
            dic_detail1["down_date"] = ''
            dic_detail[i] = dic_detail1

        dic = {
            "cid": cid,
            "years": periodDate[0:4],
            "days": periodDate,
            # "days_url": r"https://bjrbdzb.bjd.com.cn/bjrb/mobile/{}/{}/{}_m.html".format(periodDate[0:4], periodDate,
            #                                                                      periodDate),
            "stat": 1,
            "stathtml": 0,
            "versions_title": versions_title,
            "versions_url": versions_url,
            "periodDate": periodDate,
            "jsondicts": dic_detail
        }
        list_one.append(dic)

    rtn.status = SUCCESS
    rtn.msg = "下载成功"

    rtn.data = list_one

    return rtn.todict()


@router.post("/save_page")
async def parse_detail(BJRBcg: InputInfoModel[PaperBeiJingRiBaoModel]):
    return_info = ReturnInfo()
    col_paper = MongoDBClient.db30_1.client.htmljson.paper_beijingribao_test
    cjip = await col_paper.find_one({"_id": BJRBcg.data.cid})
    if cjip:
        cjip["years"] = BJRBcg.data.years
        cjip["moths"] = BJRBcg.data.moths
        cjip["days"] = BJRBcg.data.days
        cjip["days_url"] = BJRBcg.data.days_url
        cjip["stat"] = BJRBcg.data.stat
        cjip["stathtml"] = BJRBcg.data.stathtml
        cjip["versions_title"] = BJRBcg.data.versions_title
        cjip["versions_url"] = BJRBcg.data.versions_url
        cjip["update_time"] = datetime.datetime.now()
        result = await col_paper.replace_one({'_id': BJRBcg.data.cid}, cjip)
        return_info.status = SUCCESS
        return_info.data = {"boos": cjip["_id"]}
        return return_info.todict()
    else:
        doc = {}
        doc["_id"] = BJRBcg.data.cid
        doc["years"] = BJRBcg.data.years
        doc["moths"] = BJRBcg.data.moths
        doc["days"] = BJRBcg.data.days
        doc["days_url"] = BJRBcg.data.days_url
        doc["stat"] = BJRBcg.data.stat
        doc["stathtml"] = BJRBcg.data.stathtml
        doc["versions_title"] = BJRBcg.data.versions_title
        doc["versions_url"] = BJRBcg.data.versions_url
        doc["jsondicts"] = BJRBcg.data.jsondicts
        doc["update_time"] = datetime.datetime.now()
        doc["create_time"] = datetime.datetime.now()

        result = await col_paper.insert_one(doc)

        return_info.status = SUCCESS
        return_info.data = {"boos": repr(result.inserted_id)}
        return return_info.todict()


@router.post("/count_days")
async def parse_detail(BJRBcg: InputInfoModel[PaperBeiJingRiBaoModel]):
    return_info = ReturnInfo()
    col_paper = MongoDBClient.db30_1.client.htmljson.paper_beijingribao_test
    cursor = col_paper.find({"days": BJRBcg.data.days})
    cjip = await cursor.to_list(length=100)
    if cjip:
        return_info.status = SUCCESS
        return_info.data = {"count": len([str(j["_id"]) for j in cjip])}
        return return_info.todict()
    else:
        return_info.status = FAILED
        return_info.msg = "查询不存在"
        return return_info.todict()



@router.get("/select_html_url")
async def parse_detail(stat: int, stathtml: int, count: int = 10):
    """
    更新mongodb 的jsondicts字段 is_update_all 代表完全按照传上来的jsondicts进行更新
    :param count:
    :return:
    """
    return_info = ReturnInfo()
    col_paper = MongoDBClient.db30_1.client.htmljson.paper_beijingribao_test
    cursor = col_paper.find({"stat": stat, "stathtml": stathtml})
    cjip = await cursor.to_list(length=count)
    if cjip:
        i = 0
        lists = []
        for j in cjip:
            i = i + 1
            lists.append({"cid": j["_id"], "dicts": j["jsondicts"]})
            if i >= count:
                break
        return_info.status = SUCCESS
        return_info.data = lists
        return return_info.todict()
    else:
        return_info.status = FAILED
        return_info.msg = "无该id的数据"
        return_info.data = []
        return return_info.todict()


@router.get("/update_stathtml")
async def parse_detail(cid: str, stathtml: int):
    """
    更新mongodb 的jsondicts字段 is_update_all 代表完全按照传上来的jsondicts进行更新
    :param count:
    :return:
    """
    return_info = ReturnInfo()
    col_paper = MongoDBClient.db30_1.client.htmljson.paper_beijingribao_test
    cjip = await col_paper.update_one({"_id": cid}, {"$set": {"stathtml": stathtml, "update_time": datetime.datetime.now()}})
    if cjip.upserted_id:
        return_info.status = SUCCESS
        return_info.data = str(cjip.upserted_id)
        return return_info.todict()
    else:
        return_info.status = FAILED
        return_info.msg = "无该id的数据"
        return_info.data = []
        return return_info.todict()


@router.post("/article_detail")
def article_detail(BJRBcg: InputInfoModel[PaperBeiJingRiBaoArticle]):
    """
       根据文章链接返回文章详情html
       :param proxy: 代理
       :param periodDate: 报纸日期
       :param title: 文章标题
       :param url: 文章链接
       :return:
    """
    rtn = ReturnInfo()
    proxy = BJRBcg.proxy
    headers = {
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
        "Host": "bjrbdzb.bjd.com.cn",
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'

    }
    proxies = None
    if proxy:
        proxies = {
            'http': proxy,
            'https': proxy
        }
    try:
        r = requests.get(
            url=BJRBcg.data.url,
            headers=headers,
            proxies=proxies
        )
        r.encoding = r.apparent_encoding
    except:
        print(traceback.format_exc())
        rtn.msg_code = r.status_code
        rtn.status = FAILED
        rtn.msg = "请求错误，没有返回Response"
        return rtn
    if r.status_code != 200:
        rtn.msg_code = r.status_code
        rtn.status = FAILED
        rtn.msg = "返回状态码错误"
        return rtn
    htmlText = r.text
    # content = r.content
    # if content.startswith(b'\xef\xbb\xbf'):  # 去掉 utf8 bom 头
    #     content = content[3:]
    # htmlText = content.decode('utf8').strip()
    if htmlText.find("页面不存在") != -1 or htmlText.find("资源可能已被删除") != -1:
        rtn.status = FAILED
        rtn.msg_code = MsgCode.NO_RESOURCE
        rtn.msg = "资源不存在"
        return rtn
    if htmlText.find('class="article-count-info"') < 0:
        if len(htmlText.strip()) == 0:
            rtn.status = FAILED
            rtn.msg_code = MsgCode.PAGE_BLANK
            rtn.msg = "资源不存在"
            return rtn
        rtn.status = FAILED
        rtn.msg = "查询网页特征值错误"
        return rtn
    rtn.status = SUCCESS
    rtn.msg = "下载成功"
    dic = {
        "periodDate": BJRBcg.data.periodDate,
        "title": BJRBcg.data.title,
        "url": BJRBcg.data.url,
        "html": htmlText
    }
    rtn.data = dic
    return rtn.todict()


@router.get("/update_stathtml")
async def parse_detail(cid: str, stathtml: int):
    """
    更新mongodb 的jsondicts字段 is_update_all 代表完全按照传上来的jsondicts进行更新
    :param count:
    :return:
    """
    return_info = ReturnInfo()
    col_paper = MongoDBClient.db30_1.client.htmljson.paper_beijingribao_test
    cjip = col_paper.update_one({"_id": cid}, {"$set": {"stathtml": stathtml, "update_time": datetime.datetime.now()}})
    if await cjip.upserted_id:
        return_info.status = SUCCESS
        return_info.data = str(cjip.upserted_id)
        return return_info.todict()
    else:
        return_info.status = FAILED
        return_info.msg = "无该id的数据"
        return_info.data = []
        return return_info.todict()


@router.post("/update_html")
async def parse_detail(BJRBcg: InputInfoModel[UpdateHtml]):
    """
    更新mongodb 的jsondicts字段 is_update_all 代表完全按照传上来的jsondicts进行更新
    :param count:
    :return:
    """
    return_info = ReturnInfo()
    col_paper = MongoDBClient.db30_1.client.htmljson.paper_beijingribao_test
    cjip = await col_paper.find_one({"_id": BJRBcg.data.cid})
    if cjip:
        dicts = cjip["jsondicts"][BJRBcg.data.keyid]
        dicts["html"] = BJRBcg.data.html
        dicts["down_date"] = BJRBcg.data.down_date
        dicts["stat"] = 1
        cjip["update_time"] = datetime.datetime.now()
        result = await col_paper.replace_one({'_id': BJRBcg.data.cid}, cjip)

        return_info.status = SUCCESS
        return_info.data = cjip["_id"]
        return return_info.todict()
    else:
        return_info.status = FAILED
        return_info.msg = "无该id的数据"
        return_info.data = []
        return return_info.todict()


@router.get("/update_html_stat")
def parse_detail(cid: str, keyid: str, stat: int):
    """
    更新mongodb 的jsondicts字段 is_update_all 代表完全按照传上来的jsondicts进行更新
    :param count:
    :return:
    """
    return_info = ReturnInfo()
    col_paper = MongoDBClient.db30_1.client.htmljson.paper_beijingribao_test
    cjip = col_paper.update_one({"_id": cid}, {
        "$set": {"jsondicts.{}.stat".format(keyid): stat, "update_time": datetime.datetime.now()}})
    if cjip.upserted_id:
        return_info.status = SUCCESS
        return_info.data = str(cjip.upserted_id)
        return return_info.todict()
    else:
        return_info.status = FAILED
        return_info.msg = "无该id的数据"
        return_info.data = []
        return return_info.todict()


@router.post("/parse_detail")
def parse_detail(BJRBcg: InputInfoModel[PaperBeiJingRiBaoParse]):
    """
       解析文章详情页
       :return:
    """
    rtn = ReturnInfo()
    djson = json.loads(BJRBcg.data.djson)
    title_catalyst = ""
    title_alt = ""
    abstract = ""
    authors = ""
    td_raw = ""
    title = ""
    if djson["html"] != "":
        sel = Selector(text=djson["html"])
        h2_guidetitle = sel.xpath("//font[@id = 'guide']")
        if checkExist(h2_guidetitle):
            title_catalyst_list = h2_guidetitle.xpath("./text()").extract()
            title_catalyst = ''.join(title_catalyst_list)
        h2_title = sel.xpath("//font[@id = 'main-title']/b")
        if checkExist(h2_title):
            title_list = h2_title.xpath("./text()").extract()
            title = ''.join(title_list)
        h2_alttitle = sel.xpath("//font[@id = 'sub-title']")
        if checkExist(h2_alttitle):
            title_alt_list = h2_alttitle.xpath("./text()").extract()
            title_alt = ''.join(title_alt_list)
        p_info = sel.xpath("//div[contains(@class,'content my-gallery')]/p")
        for p in p_info:
            pkb = p.xpath("./span")
            if len(pkb) > 0:
                span = pkb.xpath("./text()").extract()
                if len(span) > 0:
                    span_txt = span[0].strip()
                    abstract = abstract + span_txt + "\n"
            else:
                p_txt = p.xpath("./text()").extract()
                if len(p_txt) > 0:
                    p_text = p_txt[0].strip()
                    abstract = abstract + p_text + "\n"
        span_info = sel.xpath("//div[contains(@class,'article-count-info')]/div")
        for span in span_info:
            txt = span.xpath("./text()").extract()
            if txt:
                txt = txt[0].strip()
                authors = txt.replace("本报记者", "").replace("新华社记者", "").replace("记者", "").replace("通讯员", "").replace(
                    "  ", " ").strip()
        td_raw = sel.xpath("//div[contains(@class,'content my-gallery')]").extract()[0].strip()

    rtn.status = SUCCESS
    rtn.msg = "解析成功"
    version = ""
    tmp = re.findall(r"\d+\.?\d*", djson["versions_title"])
    if checkExist(tmp):
        version = tmp[0]
    else:
        rtn.status = FAILED
        rtn.msg = "版面数据错误"
        return rtn.todict()
    index = djson["index"]
    if len(index) == 1:
        index = "0" + index
    dic = {
        "lngid_alt": "BJRB{}{}{}".format(djson["days"], version, index),
        "journal_name": "北京日报",
        'provider_url': djson["url"],
        'title': title,
        'title_catalyst': title_catalyst,
        'title_alt': title_alt,
        "authors": authors,
        "abstract": abstract,
        'pub_year': djson["years"],
        'pub_date': djson["days"],
        'versions_title': version,
        'index': djson["index"],
        "country": "CN",
        "td_raw": td_raw,
        "language": "ZH",
        "web_site": "https://www.bjd.com.cn/",
        'down_date': djson["down_date"]
    }
    rtn.data = dic
    return rtn.todict()
