# -*- coding: utf-8 -*-
# 导入Flask类

import requests
import re
import urllib
import sys
import json
import os
from imp import reload
from bs4 import BeautifulSoup
from flask import Flask, render_template, request
from flask_restful import Api, reqparse
from newspaper import Article
import WebSocket


reload(sys)
# sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='gb18030') #改变标准输出的默认编码

# 实例化，可视为固定格式
app = Flask(__name__)
api = Api(app)
parser = reqparse.RequestParser()
# 注册q参数parser才能解析get和post中的q参数。这种注册才能解析的要求是否有点孤儿
parser.add_argument('url', type=str, help='Rate to charge for this resource')


@app.route('/getarticle.php', methods=['POST'])
def Get_Article():
    if request.method == "POST":
        # post通过request.form["param_name"]形式获取参数值
        post_url = request.form["url"]
        json_dict = GetArtiList_Json(post_url)
        # TAGLIST = GetKeyWordlist(request.form["KEYWORDLIST"])
        # TAGLIST.append(GetSearchUrlList(TAGLIST))
        # baiducode = GetSearchUrlList(TAGLIST)
        return json_dict


@app.route('/post.html')
def post_html():
    # 使用render_template()方法重定向到templates文件夹下查找post.html文件
    return render_template('post.html')


@app.route('/websocket')
def Web_Socket():
    # 使用render_template()方法重定向到templates文件夹下查找post.html文件
    return render_template('websocket.html')


def senderrorcode(code):

    return


def GetSearchUrlList(Keywordlist=[]):  # 根据提供的关键字提供百度检索出来的搜索信息 并提供集合
    NEWSURLLIST = []
    # headers={'User-Agent:':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36'}
    for TAG in Keywordlist:
        # print(NEWHTML)
        NEWSURLLIST.append(GetBaiDuUrlList(TAG))

        # print(TAG)
    return json.dumps(NEWSURLLIST)
    # break


def GetBaiDuUrlList(TAG):  # 搜索百度关键字新闻得到URL列表
    headers = {
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
        'Accept-Encoding': 'gzip, deflate, compress',
        'Accept-Language': 'en-us;q=0.5,en;q=0.3',
        'Cache-Control': 'max-age=0',
        'Connection': 'keep-alive',
        'User-Agent:': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36'
    }  # 定义头文件，伪装成浏览器
    url = 'http://www.baidu.com/s?rtt=1&bsst=1&cl=2&tn=news&rsv_dl=ns_pc&pn=0&word=' + \
        urllib.parse.quote_plus(TAG)
    RESPONSE = requests.get(url, headers, timeout=5)
    PAGEHTML = RESPONSE.text
    NEWSMUM = '<span class="nums">.+?((\d+,)*\d+).+?</span>'
    PAGENUM = int(re.search(NEWSMUM, PAGEHTML).group(1).replace(",", ""))//1
    URLLIST = []
    PAGEINDEX = 0
    while PAGEINDEX <= PAGENUM:
        BAIDUARCURL = 'http://www.baidu.com/s?rtt=4&bsst=1&cl=2&tn=news&rsv_dl=ns_pc&pn=' + \
            str(PAGEINDEX) + '&word=' + urllib.parse.quote_plus(TAG)
        RESPONSEPAGE = requests.get(BAIDUARCURL, headers, timeout=5)
        ALLHTML = RESPONSEPAGE.text
        DOMBEGIN = ALLHTML.find('<div id="content_left">')
        DOMEND = ALLHTML.find('<div id="gotoPage">')
        NEWHTML = ALLHTML[DOMBEGIN:DOMEND]
        soup = BeautifulSoup(NEWHTML, 'lxml')
        ArcAllUrl = soup.find_all("h3", class_="c-title")
        for url in ArcAllUrl:
            if url not in URLLIST:
                URLLIST.append(url.find('a').get('href'))
            else:
                continue
        if len(ArcAllUrl) < 10:
            break
        PAGEINDEX += 10
        print(URLLIST)

    return URLLIST


def GetKeyWordlist(keywordlist):  # 对传入的字符串进行分割处理并且存为LIST
    KEYWORDLIST = []
    TAGLIST = keywordlist.split('|')
    for KEYWORD in TAGLIST:
        if len(KEYWORD.strip()) > 0:
            KEYWORDLIST.append(KEYWORD.strip())

    #
    return KEYWORDLIST


def GetArtiList_Json(url):
    # url = 'http://finance.sina.com.cn/stock/relnews/hk/2020-03-15/doc-iimxxstf9171747.shtml'
    news = Article(url, language='zh', keep_article_html=True)
    news.download()  # 先下载
    news.parse()  # 再解析
    data = {
        "id": 1,
        "url": (news.url) and news.url or "无数据",
        "title": (news.title) and news.title or "无数据",
        "text": (news.text) and news.text or "无数据",  # 新闻正文
        "authors":  (news.authors) and news.authors or "无数据",
        "top_image": (news.top_image) and news.top_image or "无数据",
        "movies": (news.movies) and news.movies or "无数据",
        "summary": (news.summary) and news.summary or "无数据",
        "article_html": (news.article_html) and news.article_html or "无数据"
    }

    dup = json.dumps(data, ensure_ascii=False, indent=4)
    return dup


if __name__ == '__main__':
    # print(GetArtiList_Json("http://finance.sina.com.cn/stock/relnews/hk/2020-03-15/doc-iimxxstf9171747.shtml"))
    # print(GetBaiDuUrlList("百度上市"))
    app.run(host='127.0.0.1', port='8080', debug=True)
