from flask import request

from app.handle.analysis import create_word_cloud
from config import LOCALHOST
from spider import storage_article_text
from . import web
from app.db.db import db_query_diaries, db_query_articles, db_login_user, db_query_keywords, db_query_article, \
    db_query_many_keywords


# 用户登录
@web.route('/user/login', methods=['POST'])
def login():
    phonenum = request.form['phonenum']
    passwd = request.form['passwd']
    res = db_login_user(phonenum, passwd)
    res_map = {'data': res}
    return res_map


# 查询用户所有日记（用户专有）
@web.route('/user/diary/query')
def query_diaries():
    res = db_query_diaries()
    res_map = {'data': res}
    return res_map


# 查询所有文章（不是用户专有）
@web.route('/article/query/<int:page>/<int:hot>')
def query_articles(page, hot):
    res = db_query_articles(page, hot)
    res_map = {'data': res}
    return res_map


# 根据文章id查询一篇文章内容
@web.route('/article/query/single/<int:aid>')
def query_article(aid):
    res = db_query_article(aid)
    res_map = {'data': res}
    print(res_map)
    return res_map


# 查询所有文章的关键字（需要去重）
@web.route('/article/keyword/query')
def query_keyword():
    res = db_query_keywords(5)
    res_map = {'data': res}
    return res_map


# 爬取博客到数据库中
@web.route('/article/fetch/burl', methods=['POST'])
def fetch_articles():
    spider_url = request.form['burl']
    print(spider_url)
    storage_article_text(spider_url)
    return '成功爬取'


@web.route('/article/analysis/<int:flag>')
def get_analysis(flag):
    if flag:    # 再次生成词云
        data_url = create_word_cloud()
    else:       # 还用之前的词云
        data_url = 'http://'+LOCALHOST+':5002/static/images/analysis/wct.jpg'
    return data_url


@web.route('/article/keyword/many')
def get_keyword_many():
    res = db_query_many_keywords()
    res_map = {'data': res}
    return res_map
