from flask import Blueprint, render_template, request, jsonify, abort, Response
from flask_login import login_required

from Config import db
from models.my_model import Post, R, User
from datetime import datetime, timedelta

from wordcloud import WordCloud
from wordcloud import get_single_color_func
import numpy as np
import jieba
import paddle
import collections
from PIL import Image

page_data = Blueprint("data", __name__)


@page_data.route("/test")
def test_page():
    get_today_n_hot_title_split()
    get_today_n_hot_title_split(1)
    get_today_n_hot_title_split(200)
    return "ok"


@page_data.route("/data")
@login_required
def data():
    '''
    data主视图
    :return: response
    '''
    r3p = __recent_30_post()  # 过去30天的帖子
    r3u = __recent_30_user()  # 过去30天的用户
    r75ht = __recent_n_5_hot_title(7)  # 过去n天的5个热词，此处n=7

    return render_template(
        "site_data.html",
        recent_30_post=r3p,
        recent_30_user=r3u,
        recent_7_5_hot_title=r75ht
    )


@page_data.route("/_refresh_and_get_wordcloud")
@page_data.route("/_refresh_and_get_wordcloud/<usemask>")
def refresh_and_get_wordcloud(usemask=None):
    '''
    刷新词云图片缓存
    :return:
    '''
    mask = None
    # 读取遮罩
    # with Image.open("static/img/PSDN_mask.png") as f:
    #     mask = f
    try:
        mask = Image.open("static/img/PSDN_mask.png")
    except Exception as e:
        print(e)
        return jsonify(
            R(500, "异常")
        ), 500

    if mask:
        text = __get_all_title_cut_str()
        # print(text)
        mask = np.array(mask)
        # print(mask)
        # print(mask.shape[1])
        stopwords = {"我", "你", "她", "的", "是", "了", "在", "也", "和", "就", "都", "这"}
        if not usemask:
            print("TRUE")
            wc = WordCloud(
                font_path="msyh.ttc",
                mask=mask,
                width=1000,
                height=700,
                background_color='white',
                max_words=600,
                stopwords=stopwords,
                color_func=get_single_color_func("#ff6a46")).generate(text)
            wc.to_file("static/img/WC.png")
            pngReturn = None
            try:
                f = open("static/img/WC.png", 'rb')
                pngReturn = f.read()
                f.close()
            except Exception as e:
                print(e)
                return jsonify(
                    R(500, "异常").define()
                ), 500
            return Response(pngReturn, mimetype='image/jpeg')
        else:
            print("FALSE")
            wc = WordCloud(
                font_path="msyh.ttc",
                width=1000,
                height=700,
                background_color='white',
                max_words=600,
                stopwords=stopwords,
                color_func=get_single_color_func("#ff6a46")).generate(text)
            wc.to_file("static/img/WC_without_mask.png")
            pngReturn = None
            try:
                f = open("static/img/WC_without_mask.png", 'rb')
                pngReturn = f.read()
                f.close()
            except Exception as e:
                print(e)
                return jsonify(
                    R(500, "异常").define()
                ), 500
            return Response(pngReturn, mimetype='image/jpeg')
    else:
        return jsonify(
            R(500, "异常").define()
        ), 500


@page_data.route("/_get_wordcloud")
def get_wordcloud():
    '''
    获取缓存中的词云图
    :return:
    '''
    pngReturn = None
    try:
        f = open("static/img/WC.png", 'rb')
        pngReturn = f.read()
        f.close()
    except Exception as e:
        print(e)
        return jsonify(
            R(500, "异常").define()
        ), 500
    return Response(pngReturn, mimetype='image/jpeg')


def get_today_n_hot_title_split(n: int = 1) -> [list, None]:
    '''
        今天 (过去24h) 的n个热词，n默认为1
        :param n:
        :return:
        '''
    res: list = list()
    titles_split: list = __get_all_title_cut_str(1).split(" ")
    words_count: dict = collections.Counter(titles_split)
    try:
        words_count.pop("")
    except KeyError:
        print("no blank key, skipped")
    if len(words_count) <= 0:
        return None
    words_count: list = sorted(words_count.items(), key=lambda x: x[1], reverse=True)
    if len(words_count) < n:
        n = len(words_count)
    print(words_count)
    for i in range(len(words_count)):
        if i == n:
            break
        res.append(words_count[i][0])
    print(res)
    return res


def __recent_30_post() -> list:
    NOW = datetime.now()
    post_list: list = Post.query.filter(Post.create_time >= NOW - timedelta(days=30)).order_by(Post.create_time).all()
    date_list: list = list()
    for post in post_list:
        date_list.append(post.create_time.strftime("%Y-%m-%d"))
    data_count: dict = collections.Counter(date_list)
    res_list: list = list()
    for k, v in data_count.items():
        item: list = list()
        item.append(str(k))
        item.append(str(v))
        res_list.append(item)
    return res_list


def __recent_30_user() -> list:
    NOW = datetime.now()
    users: list = User.query.filter(User.create_time >= NOW - timedelta(days=30)).order_by(User.create_time).all()
    date_list: list = list()
    for post in users:
        date_list.append(post.create_time.strftime("%Y-%m-%d"))
    data_count: dict = collections.Counter(date_list)
    res_list: list = list()
    for k, v in data_count.items():
        item: list = list()
        item.append(str(k))
        item.append(str(v))
        res_list.append(item)
    return res_list


def __recent_n_5_hot_title(n: int = 7) -> [list, None]:
    '''
    过去n天的5个热词，n默认为7
    :param n:
    :return:
    '''
    res: list = list()
    param: dict = dict()  # echat渲染参数
    param["value"] = 0
    param["itemStyle"] = {
        # stop the chart from rendering this piece
        'color': 'none',
        'decal': {
            'symbol': 'none'
        }
    }
    param["label"] = {
        'show': False
    }
    titles_split: list = __get_all_title_cut_str(n).split(" ")
    words_count: dict = collections.Counter(titles_split)
    try:
        words_count.pop("")
    except KeyError:
        print("no blank key, skipped")
    words_count: list = sorted(words_count.items(), key=lambda x: x[1], reverse=True)
    if len(words_count) < 5:
        print("帖子太少啦")
        temp_dic: dict = {"name": "帖子太少，统计中...", "value": "100"}
        param["value"] += temp_dic["value"]
        res.append(temp_dic)
    else:
        for i in range(len(words_count)):
            if i == 5:
                break
            temp_dic: dict = dict(zip(("name", "value"), words_count[i]))
            param["value"] += temp_dic["value"]
            res.append(temp_dic)
    res.append(param)
    return res


def __get_all_title_cut_str(days=-1) -> str:
    '''
    获取title的分词切片
    :param days: 可选参数, 查询近 n 天的帖子, 默认返回所有帖子的分词切片
    :return: str
    '''
    paddle.enable_static
    jieba.enable_paddle()  # 启动paddle模式。 0.40版之后开始支持，早期版本不支持
    NOW = datetime.now()
    text: str = ""
    if days > 0:
        allPost: list = Post.query.filter(Post.create_time >= NOW - timedelta(days=days)).order_by(
            Post.create_time).all()
        words: list = []
        for post in allPost:
            seg_list = jieba.cut_for_search(post.title)
            seg_list = ",".join(seg_list).split(",")  # Generator 转换为列表
            for i in seg_list:
                words.append(i)
        text = ' '.join(words)
    else:
        allPost: list = db.session.query(Post).all()
        words: list = []
        for post in allPost:
            seg_list = jieba.cut_for_search(post.title)
            seg_list = ",".join(seg_list).split(",")  # Generator 转换为列表
            for i in seg_list:
                words.append(i)
        text = ' '.join(words)
    return str(text)


@page_data.errorhandler(401)
def errorhandler_401(error):
    return render_template("TEMPLATE.html", msg=error), 401
