from flask import request,Blueprint,jsonify
import requests
from bs4 import BeautifulSoup
import pymysql as pymysql

from analyze.analyze import createIndex
from hotword.hotword import hotwordCreate
from pojo.Content import Content
import analyze
import re

xlSpider = Blueprint('xlSpider', __name__)
url='https://s.weibo.com/top/summary?Refer=top_hot&topnav=1&wvr=6'
def getHTMLText(url,timeout=30):
    r=requests.get(url,timeout=timeout)
    r.raise_for_status()
    r.encoding=r.apparent_encoding
    return r.text

@xlSpider.route("/createSpider")
def  createSpider():
    html = getHTMLText(url)
    soup = BeautifulSoup(html, 'html.parser')
    list=[]
    list2=[]
    for item in soup.find_all('tr',class_=""):  # 查找div的class为item的dom
        # print(item)   #测试，查看电影item信息
        # 保存一部电影信息
        dict = {
            "url": "",
            "content": "",
            "score": ""
        }
        item = str(item)  # 转换字符串
        # innerHtml=BeautifulSoup(item, 'html.parser')
        # ahtml=innerHtml.find_all('a')
        # ahtml[0].string
        # ahtml[0]['href']
        # 正则表达式查找影片链接
        obj =re.findall(re.compile(r'<a href="(.*?)" target="_blank">(.*)</a>'), item)
        content=Content()
        if obj.__len__()>0:
            content.url = obj[0][0]
            dict.update({
                "url":obj[0][0]
            })
            if obj[0].__len__()>0:
                content.content=obj[0][1]
                dict.update({
                    "content": obj[0][1]
                })
        # content = re.findall(re.compile(r'<a target="_blank">(.*)</a>'), item)[0]
        score = re.findall(re.compile(r'<span>(.*)</span>'), item)
        if score.__len__()>0:
            content.score=score[0]
            dict.update({
                "score": score[0]
            })
        list2.append(dict)
        list.append(content)
    saveToDB(list)
    # createIndex(list)
    # hotwordCreate(list)
    return "爬取创建成功"

@xlSpider.route("/clearData")

def clearData():
    conn = getConn()
    conn.autocommit(1)
    cursor = conn.cursor()
    try:
        sql = "truncate xlwb_data"
        cursor.execute(sql)
    except:
        import traceback
        traceback.print_exc()
        # 发生错误时回滚
        conn.rollback()
    finally:
        # 关闭游标连接
        cursor.close()
        # 关闭数据库连接
        conn.close()
    return "清空数据成功"



@xlSpider.route("/createIndexDoc")
def createIndexDoc():
    conn = getConn()
    conn.autocommit(1)
    cursor = conn.cursor()
    try:
        sql = "SELECT distinct * FROM xlwb_data"
        cursor.execute(sql)
        data = cursor.fetchall()
        list = []
        for d in data:
            content = Content()
            content.content=d[1]
            content.url=d[2]
            content.score=d[3]
            list.append(content)
        createIndex(list)
        hotwordCreate(list)
    except:
        import traceback
        traceback.print_exc()
        # 发生错误时回滚
        conn.rollback()
    finally:
        # 关闭游标连接
        cursor.close()
        # 关闭数据库连接
        conn.close()
    return "成功"



@xlSpider.route("/getDataFromDB")
def getDataFromDB():
    keyword=request.args.get("keyword")
    page=int(request.args.get("page"))
    limit=int(request.args.get("limit"))
    start=(page-1)*limit
    conn=getConn()
    conn.autocommit(1)
    cursor = conn.cursor()
    try:
        countSQL="select count(id)  count from xlwb_data where 1=1 and content like  '%{}%'".format(keyword)
        cursor.execute(countSQL);
        count =cursor.fetchone()
        sql="SELECT distinct * FROM xlwb_data WHERE  1=1 and content like '%{}%' limit {},{} ".format(keyword,start,limit)
        cursor.execute(sql)
        data = cursor.fetchall()
        list=[]
        for d in data:
            dict = {
                "id": d[0],
                "content": d[1],
                "url": d[2],
                "score":d[3]
            }
            list.append(dict)

        resp = {
            "code": 0,
            "msg": "",
            "data": list,
            "count": count
        }
        return jsonify(resp)
    except:
        import traceback
        traceback.print_exc()
        # 发生错误时回滚
        conn.rollback()
    finally:
        # 关闭游标连接
        cursor.close()
        # 关闭数据库连接
        conn.close()
    return
def getConn():
    return pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd='123456', db='xlwb_spider', charset='utf8')
def saveToDB(list):
    conn = getConn()
    conn.autocommit(1)
    cursor=conn.cursor();
    try:
        for item in list:
            if item.content=="":
                continue
            sql= 'INSERT INTO xlwb_data(content,url,score) VALUES ("{content}","{url}","{score}")'.format(
                content=pymysql.escape_string(item.content),
                url=pymysql.escape_string(item.getAbsoloteURL()),
                score=pymysql.escape_string(item.score)
            )
            cursor.execute(sql)
    except:
        import traceback
        traceback.print_exc()
        # 发生错误时回滚
        conn.rollback()
    finally:
        # 关闭游标连接
        cursor.close()
        # 关闭数据库连接
        conn.close()