from django.shortcuts import render
import pandas as pd
import matplotlib.pyplot as plt
from wordcloud import WordCloud
from snownlp import SnowNLP
from io import BytesIO
import base64
import numpy as np
# import csv
import cx_Oracle

# 第0个页面
def initialPage(request):
    context = {}
    return render(request, 'initialPage.html', context)

# 主页
def houseCommentHome(request):
    context = {
        'user_comment_url': "http://localhost:8000/house_comment_app/houseUserComment/",
        'media_news_url': "http://localhost:8000/house_comment_app/houseMediaNews/",
        'dynamic_tagging_url': "http://localhost:8000/house_comment_app/dynamicTag/",
        'admin_url':"http://localhost:8000/admin/"
    }
    return render(request, 'houseCommentHome.html', context)

# 提取关键词，聚类可视化需要用到的函数
# 读取数据库文件绘制词云图，参数是tr_comment_keyword.csv,参数类型是DataFrame
def word_cloud(df):
    kw=df.values.tolist()
    words_frequence = {x[0]: x[1] for x in kw}
    backgroud_Image = plt.imread('static/image/house.jpg')
    wordcloud = WordCloud(font_path='simsun.ttc', mask=backgroud_Image, repeat=True,max_words=50, background_color='white')
    wordcloud = wordcloud.fit_words(words_frequence)
    plt.figure()
    plt.axis('off')
    buffer = BytesIO()
    plt.imshow(wordcloud)
    plt.savefig(buffer)
    pltData = buffer.getvalue()
    imb = base64.b64encode(pltData)
    ims = imb.decode()
    imd = 'data:image/png;base64,' + ims
    return imd


#读取pca聚类结果图进行聚类结果显示,参数类型是DataFrame
def cluster(X_pca_frame):
    # 聚类绘图
    cluster_1_color = {0: 'red', 1: 'green', 2: 'blue', 3: 'yellow', 4: 'cyan', 5: 'black', 6: 'magenta', 7: '#fff0f5',
                       8: '#ffdab9', 9: '#ffa500'}
    colors_clustered_data = X_pca_frame['cluster'].map(cluster_1_color)
    plt.figure()
    plt.scatter(X_pca_frame['pca_1'].values, X_pca_frame['pca_2'].values,
                c=colors_clustered_data, marker='.')
    buffer = BytesIO()
    plt.savefig(buffer)
    buffer.seek(0)
    pltData = buffer.getvalue()
    imb = base64.b64encode(pltData)
    ims = imb.decode()
    imd = 'data:image/png;base64,' + ims
    return imd

def SentimentAnalysis(df):
    ## 情感分析结果
    sentimentlist = df['sentiment_score'].values.tolist()
    plt.figure()
    # 可视化结果
    plt.hist(sentimentlist, bins=np.arange(0, 1, 0.01), facecolor='b')
    plt.xlabel('Probability')
    plt.ylabel('Quantity')
    plt.title('Sentiment Analysis')
    buffer = BytesIO()
    plt.savefig(buffer)
    print("图片保存成功！！！！！！！！！！！！！")
    buffer.seek(0)
    pltData = buffer.getvalue()
    imb = base64.b64encode(pltData)
    ims = imb.decode()
    imd = 'data:image/png;base64,' + ims
    return imd

# 子页面1：用户评论
def houseUserComment(request):

    conn = cx_Oracle.connect('yx/233@localhost:1521') #connent to oracle
    cur = conn.cursor() 

    # total_df = pd.read_csv('static/data/keyword2cloud/comment_keyword.csv',index_col=0, encoding="utf-8")
    # cluster_pic=pd.read_csv('static/data/data2drawcluster/pca_comment_cluster_result.csv',index_col=0, encoding="utf-8")
    # cluster0_cloud=pd.read_csv('static/data/cluster2cloud/tr_commentcluster0_keyword.csv',index_col=0, encoding="utf-8")
    # cluster1_cloud=pd.read_csv('static/data/cluster2cloud/tr_commentcluster1_keyword.csv',index_col=0, encoding="utf-8")
    # sentiment = pd.read_csv('static/data/data2sentiment/comment_sentiment_results.csv',index_col=0, encoding="utf-8")
    # adj_sentiment = pd.read_csv('static/data/data2sentiment/comment_adj_sentiment_results.csv',index_col=0, encoding="utf-8")
    
    temp = []
    sql='select * from tr_comment_keyword'
    cur.execute(sql)
    for res in cur.fetchall():
        temp.append({
            'word':str(res[1]),
            'score':res[2],
        })
    total_df=pd.DataFrame(temp)

    temp = []
    sql='select * from tr_commentcluster0_keyword'
    cur.execute(sql)
    for res in cur.fetchall():
        temp.append({
            'word':str(res[1]),
            'score':res[2],
        })
    cluster0_cloud=pd.DataFrame(temp)

    temp = []
    sql='select * from tr_commentcluster1_keyword'
    cur.execute(sql)
    for res in cur.fetchall():
        temp.append({
            'word':str(res[1]),
            'score':res[2],
        })
    cluster1_cloud=pd.DataFrame(temp)

    temp = []
    sql='select * from pca_comment_cluster_result'
    cur.execute(sql)
    for res in cur.fetchall():
        temp.append({
            'pca_1':res[1],
            'pca_2':res[2],
            'cluster':res[3]
        })
    cluster_pic=pd.DataFrame(temp)

    temp = []
    sql='select * from comment_sentiment_results'
    cur.execute(sql)
    for res in cur.fetchall():
        temp.append({
            'word':str(res[0]),
            'sentiment_score':res[1],
        })
    sentiment=pd.DataFrame(temp)

    temp = []
    sql='select * from comment_adj_sentiment_results'
    cur.execute(sql)
    for res in cur.fetchall():
        temp.append({
            'word':str(res[0]),
            'sentiment_score':res[1],
        })
    adj_sentiment=pd.DataFrame(temp)

    context = {
        'total_wordcloud': word_cloud(total_df),
        'cluster_pic': cluster(cluster_pic),
        'cluster0_cloud': word_cloud(cluster0_cloud),
        'cluster1_cloud': word_cloud(cluster1_cloud),
        'sentiment_analysis':SentimentAnalysis(sentiment),
        'adj_sentiment_analysis':SentimentAnalysis(adj_sentiment)
    }
    return render(request, 'houseUserComment.html', context)


# 子页面2：媒体新闻
def houseMediaNews(request):

    conn = cx_Oracle.connect('yx/233@localhost:1521') #connent to oracle
    cur = conn.cursor() 

    # total_df = pd.read_csv('static/data/keyword2cloud/news_keyword.csv', index_col=0, encoding="utf-8")
    # cluster_pic = pd.read_csv('static/data/data2drawcluster/pca_news_cluster_result.csv', index_col=0,
    #                           encoding="utf-8")
    # cluster0_cloud = pd.read_csv('static/data/cluster2cloud/tr_newscluster0_keyword.csv', index_col=0,
    #                              encoding="utf-8")
    # cluster1_cloud = pd.read_csv('static/data/cluster2cloud/tr_newscluster1_keyword.csv', index_col=0,
    #                              encoding="utf-8")
    # sentiment = pd.read_csv('static/data/data2sentiment/news_sentiment_results.csv', index_col=0, encoding="utf-8")
    # adj_sentiment = pd.read_csv('static/data/data2sentiment/news_adj_sentiment_results.csv', index_col=0,
    #                             encoding="utf-8")

    temp = []
    sql='select * from tr_news_keyword'
    cur.execute(sql)
    for res in cur.fetchall():
        temp.append({
            'word':str(res[1]),
            'score':res[2],
        })
    total_df=pd.DataFrame(temp)

    temp = []
    sql='select * from tr_newscluster0_keyword'
    cur.execute(sql)
    for res in cur.fetchall():
        temp.append({
            'word':str(res[1]),
            'score':res[2],
        })
    cluster0_cloud=pd.DataFrame(temp)

    temp = []
    sql='select * from tr_newscluster1_keyword'
    cur.execute(sql)
    for res in cur.fetchall():
        temp.append({
            'word':str(res[1]),
            'score':res[2],
        })
    cluster1_cloud=pd.DataFrame(temp)

    temp = []
    sql='select * from pca_news_cluster_result'
    cur.execute(sql)
    for res in cur.fetchall():
        temp.append({
            'pca_1':res[1],
            'pca_2':res[2],
            'cluster':res[3]
        })
    cluster_pic=pd.DataFrame(temp)

    temp = []
    sql='select * from news_sentiment_results'
    cur.execute(sql)
    for res in cur.fetchall():
        temp.append({
            'word':str(res[0]),
            'sentiment_score':res[1],
        })
    sentiment=pd.DataFrame(temp)

    temp = []
    sql='select * from news_adj_sentiment_results'
    cur.execute(sql)
    for res in cur.fetchall():
        temp.append({
            'word':str(res[0]),
            'sentiment_score':res[1],
        })
    adj_sentiment=pd.DataFrame(temp)
    

    context = {
        'total_wordcloud': word_cloud(total_df),
        'cluster_pic': cluster(cluster_pic),
        'cluster0_cloud': word_cloud(cluster0_cloud),
        'cluster1_cloud': word_cloud(cluster1_cloud),
        'sentiment_analysis': SentimentAnalysis(sentiment),
        'adj_sentiment_analysis': SentimentAnalysis(adj_sentiment)
    }
    return render(request, 'houseMediaNews.html', context)


# 子页面3：动态词性标注
import jieba
import jieba.analyse
import jieba.posseg as psg
import re
from jinja2 import Environment, FileSystemLoader
from pyecharts.globals import CurrentConfig
from django.conf import settings

CurrentConfig.GLOBAL_ENV = Environment(loader=FileSystemLoader("{}/templates".format(settings.BASE_DIR)))


def dynamicTag(request):
    names = {}  # 姓名字典
    relationships = {}  # 关系字典
    # limenames 记录的是每一行出现的名字， 也就是说，只有出现在用一行的名字才认为是有关系的
    textNames = []
    part_of_speech = []
    userInput = ''
    context = {
        'userInput': userInput,
    }
    kwlist = []

    # sentence = r'计原书里的沈阳阳打死也想不到，她竟然因为这么啼笑皆非的理由，被一个来自神秘遥远的辽北黑土地的女人，夺舍了。沈阳阳四下一看，才发现所有人都坐着，只有她站着。原书女主接受别人的审视和挑拣就算了，她可不行。她大喇喇地坐下，看也不看这些人一眼：「你们让我嫁我就嫁啊？不嫁。」继母腾一下站起来：「你敢这么跟我说话？也不照照镜子看看你自己是什么东西，能嫁给陆先生还是你高攀了呢，这是几辈子修来的福分！我告诉你，你嫁也得嫁，不嫁也得嫁！」「妹长耳朵啊？我就不嫁，你能咋的？」好像，沈阳阳的口音，突然变得有点不一样了呢……'

    if request.method == "GET":
        return render(request, 'dynamicTag.html', context)
    else:

        userInput = request.POST.get("userInput")
        print(userInput)

        temp1 = userInput.strip()

        temp2 = re.split(r"[!！。；;]", temp1)
        # print (temp2)
        i = 1

        for lines in temp2:
            lineNames = []
            # print(str(i))
            i += 1
            lines = lines.strip()
            lines = lines.replace('[^\w]+', '')
            # print (lines)
            poss = psg.cut(lines)

            for w in poss:
                # 命名实体识别：段落分成词语+词性
                a = []
                a.append(w.word)
                a.append(w.flag)
                part_of_speech.append(a)

                if w.flag != "n" and w.flag != "ns" and w.flag != "nr":  # or len(w.word) < 2
                    continue  # 当分词长度小于2或该词词性不为nr时认为该词不为人名
                # limenames 记录的是每一行出现的名字， 也就是说，只有出现在用一行的名字才认为是有关系的
                lineNames.append(w.word)  # 为当前段的环境增加一个人物
                # print(lineNames)
                if names.get(w.word) is None:
                    names[w.word] = 0
                    relationships[w.word] = {}
                names[w.word] += 1
            textNames.append(lineNames)
        # print(textNames)
        # print(names)
        for line in textNames:  # 对于每一段
            for name1 in line:
                for name2 in line:  # 每段中的任意两个人
                    if name1 == name2:
                        continue
                    # 如果名字1 和名字2 不相同的话
                    # 也就是说，关系的抽取是基于 这一行有没有出现这两个名字
                    if relationships[name1].get(name2) is None:  # 若两人尚未同时出现则新建项
                        relationships[name1][name2] = 1
                    else:
                        relationships[name1][name2] = relationships[name1][name2] + 1

        from pyecharts import options as opts
        import json
        from pyecharts import options as opts
        from pyecharts.charts import Graph
        links = []
        nodes = []
        linestyle_opts = opts.LineStyleOpts(is_show=True,
                                            width=1,
                                            opacity=0.6,
                                            curve=0.3,
                                            type_="solid",
                                            color="gray"
                                            )
        for name0, edges in relationships.items():

            node = opts.GraphNode(name=name0,  # 节点名称 ★ 不能有重名！
                                  symbol_size=10 + len(edges), symbol='circle')
            nodes.append(node)
            for v, w in edges.items():
                source_node_name = name0
                target_node_name = v
                links.append(
                    {"source": source_node_name, "target": target_node_name, "lineStyle": linestyle_opts, "value": w})

        g = (
            Graph(init_opts=opts.InitOpts(width="1000px", height="600px"))
                .add(
                "",
                nodes=nodes,
                links=links,
                # categories=categories,
                # layout="circular",
                is_rotate_label=True,
                linestyle_opts=opts.LineStyleOpts(color="source", curve=0.3),
                label_opts=opts.LabelOpts(position="right"),
                edge_label=opts.LabelOpts(is_show=True, position='middle', formatter=' 关系{c}')
            )
                .set_global_opts(
                title_opts=opts.TitleOpts(title="和弦关系图", title_textstyle_opts=opts.TextStyleOpts(color="midnightblue")),
                legend_opts=opts.LegendOpts(orient="vertical", pos_left="2%", pos_top="20%"),
            )
            #     .render("和弦关系图.html")
        )

        # 词云图=================================================================================
        from pyecharts import options as opts
        from pyecharts.charts import WordCloud

        from pyecharts.charts import Grid, Page

        namelist = my_list = list(zip(names.keys(), names.values()))

        w = (WordCloud()
             .add("", data_pair=namelist, word_size_range=[20, 100],
                  textstyle_opts=opts.TextStyleOpts(font_family='Microsoft YaHei', font_weight='bold'))
             .set_global_opts(title_opts=opts.TitleOpts(title="词云图",
                                                        title_textstyle_opts=opts.TextStyleOpts(
                                                            color="midnightblue")))
             )
        page_1 = Page(layout=Page.SimplePageLayout)
        page_1.add(w)
        page_1.add(g)

        # 保存生成的词云图关系图
        page_1.render("{}/templates/fc_show.html".format(settings.BASE_DIR))
        tag0 = "ok"

        return render(request, 'dynamicTag.html', context={"lst": part_of_speech,"tag":tag0})


def jump_charts(request):
    """
    #画关系图词云图
    """
    return render(request, 'fc_show.html', {})
