from django.shortcuts import render
import pandas as pd
import re
import base64
import io
from os import path
import matplotlib.pyplot as plt
from wordcloud import WordCloud
import jieba.analyse
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.decomposition import PCA
# snownlp中文分词
from snownlp import SnowNLP
import copy

import os
from sklearn.cluster import KMeans
# Create your views here.

def getRootPath():# 获得项目根路径
    # 获取文件目录
    curPath = os.path.abspath(os.path.dirname(__file__))
    print("curPath:",curPath)
    # 获取项目根路径，内容为当前项目的名字
    testPath = curPath.strip("prehandle")
    print(testPath)
    # rootPath = curPath[:curPath.find("weiboProject\\")+len("weiboProject\\")]
    # print("rootPath:",rootPath)
    return  testPath

#print(getRootPath()+ "static\\" + r'dataset\d1.xlsx')


def read_data():
    global resource_url
    resource_url = getRootPath() + "static\\"

    global data
    data = pd.read_excel(resource_url+'dataset\\d1.xlsx')

    for i in range(2, 15):
        filename = resource_url+"dataset\\d" + str(i) + ".xlsx"
        temp = pd.read_excel(filename)
        data = pd.concat([data, temp])
    # 处理前的文本数据存成文件
    with pd.ExcelWriter( resource_url+'dataset\\Alldata_prehandle_before.xlsx', engine='xlsxwriter',
                        options={'strings_to_urls': False}) as writer:
        data.to_excel(writer, index=False)
    return data

def del_duplicate(data):
    # 去重处理
    data.drop_duplicates(keep='last', inplace=True)
    return data
    # 进行数据存储
    # 处理后的数据存入数据库

def del_stopwords(divided_word_list):
    # 把分词的那个list去停用词

    global stopwords
    stopwords = set()
    for word in open(resource_url+"dataset\\stopwords.txt", "r", encoding="UTF-8"):
        stopwords.add(word.strip())
    print("stopwords:",stopwords)

    after_del_word_list = []
    for word in divided_word_list:
        for stopword in stopwords:
            word = word.replace(stopword,"")
        after_del_word_list.append(word)
    return after_del_word_list

def del_special_char(des_string, re_string=''):
    # 通过re过滤除中英文及数字以外的其他字符
    res = re.compile("[^\\u4e00-\\u9fa5^a-z^A-Z^0-9]")
    return res.sub(re_string, des_string)

def divide_sentence(sentense): #分词函数
    # snownlp中文分词
    s = SnowNLP(sentense)
    return s.words #是个[] 分好了词

#print(divide_sentence("我去，这个世界为啥这么疯狂啊"))

def prehandle(request):
    data = read_data() #读取数据
    data = del_duplicate(data) #去重
    print(data.head())


    msg_data = data['msg_content']
    print(msg_data)
    global msg_list
    msg_list = []
    for i in range(len(msg_data)): #去除特殊符号
        msg_list.append(msg_data.iloc[i])
        msg_list[i] = del_special_char(msg_list[i])
    global divide_words
    divide_words = []
    for msg in msg_list:#分词
        divide_words.extend(divide_sentence(msg))
    #去停用词
    divide_words = del_stopwords(divide_words)

    divide_words_dataframe = pd.DataFrame(divide_words)
    # 分词数据：divide_words
    with pd.ExcelWriter(resource_url+'dataset\\data_befort_prehandle.xlsx', engine='xlsxwriter',
                        options={'strings_to_urls': False}) as writer:
        divide_words_dataframe.to_excel(writer, index=False) # 分词前 啥都没处理  (去除重复跳数了)data

    with pd.ExcelWriter(resource_url+'dataset\\data_befort_prehandle.xlsx', engine='xlsxwriter',
                        options={'strings_to_urls': False}) as writer:
        data.to_excel(writer, index=False) # 分词前 啥都没处理  (去除重复跳数了)data

    global msg_dataframe
    msg_dataframe = pd.DataFrame(msg_list,columns=["msg_content"])# 分词前，没有预处理之前的完整数据(但是去重，去除特殊符号了了)msg_list
    with pd.ExcelWriter(resource_url+'\\dataset\\data_after_prehandle.xlsx', engine='xlsxwriter',
                        options={'strings_to_urls': False}) as writer:
        msg_dataframe.to_excel(writer, index=False)
    args = {
        "data":data, #处理前的数据
        "msg_list":msg_list,#处理后的数据
        "divide_words":divide_words,#分词数据
        "cluster_res_data": "",
        "cluster_res_labels":"",
        "plot_url": ""
    }
    cluster_res_data= Kmeans_cluster()
    cluster_res_labels = cluster_res_data['labels']
    cluster_res_data = cluster_res_data['msg_content']

    args["cluster_res_data"] = cluster_res_data
    args["cluster_res_labels"] = cluster_res_labels
    args["plot_url"] = generate_word_cloud()

    # args.update(Kmeans_cluster(request))
    # args.update(generate_word_cloud(request))

    print("预处理结束")
    print(args["plot_url"])
    return render(request, 'prehandle.html', args)
def cluster_res(request):
    return render(request, 'lda_result_4.html')

################################################################################################################
def decrease_dimension():
    count = CountVectorizer(tokenizer=divide_sentence, stop_words=list(stopwords))
    countvector = count.fit_transform(data['msg_content']).toarray()
    # 此处的主成分维度
    pca = PCA(n_components=2)
    # 将设置了维数的模型作用到标准化后的数据集并输出查看
    newvector = pca.fit_transform(countvector)
    global newvector_frame
    newvector_frame = pd.DataFrame(newvector, columns=['pca_1', 'pca_2'])  # 2列 2个columns


def Kmeans_cluster():
    # 此处指定K=2
    decrease_dimension()
    kms = KMeans(n_clusters=2)
    kms.fit(newvector_frame)
    # 获取数据标签值
    kmeans_cluster_labels = copy.copy(kms.labels_)
    kmeans_cluster_labels = pd.DataFrame(kmeans_cluster_labels)
    kmeans_cluster_labels.columns = ["labels"]



    kmeans_cluster_labels.reset_index(drop=True, inplace=True)
    msg_dataframe.reset_index(drop=True, inplace=True)
    # cluster_res_data = pd.concat([msg_data,kmeans_cluster_labels],axis=1)
    global cluster_res_data
    cluster_res_data =pd.concat([msg_dataframe,kmeans_cluster_labels],axis = 1)
    print("聚类结束")
    return cluster_res_data
    # args = {
    #     "cluster_res_data":cluster_res_data
    # }
    # return args
    # return render(request,'prehandle.html',args)
#
# def generate_word_cloud():
#     # 以cluster=0的聚类结果为例
#     cluster0 = cluster_res_data[cluster_res_data['labels'] == 0]
#
#     cluster0_content = ' '.join(cluster0['msg_content'])
#
#     def del_punctuation(str):
#         punctuation = ['↓', '：', '；', '~', '/', '\\', '#', '$', '&', '-', '+', ',', '。', '.', '|', '，', '?', '？', '!',
#                        '！', '“', '”', '"', '·', ' ', '【', '】', '{', '}', '*', '(', ')', '（', '）']
#         for i in punctuation:
#             str = str.replace(i, "")
#         return str
#
#     cluster0_content_after_del_punctuation = del_punctuation(cluster0_content)
#
#     # 提取cluster 0的主题关键词
#     kw1 = jieba.analyse.textrank(cluster0_content_after_del_punctuation, topK=20, withWeight=True,
#                                  allowPOS=('ns', 'n', 'vn', 'v'))
#     words_frequence = {x[0]: x[1] for x in kw1}
#     # 若是有中文的话，font_path ='simsun.ttc'必须添加，不然会出现方框，不出现汉字
#     # simsun.ttc为汉字编码文件，可以从本地windows系统找一个汉字编码文件上传， 如C:\\Windows\Fonts下有许多汉字编码文件
#     backgroud_Image = plt.imread(resource_url+'images\\cat.jpg')
#     wordcloud = WordCloud(font_path=resource_url+'dataset\\YuGothL.ttc', mask=backgroud_Image, repeat=True, background_color='white')
#     wordcloud = wordcloud.fit_words(words_frequence)
#     plt.imshow(wordcloud)
#     plt.show()
#
#     buffer = io.BytesIO()
#     wordcloud.to_image().save(buffer, 'png')
#     plot_url = base64.b64encode(buffer.getvalue()).decode()
#
#     # plot_url = base64.b64encode(img.getvalue()).decode()
#     # print("plot_url=",plot_url)
#     # args = {
#     #     "plot_url":plot_url
#     # }
#     print("图片生成结束")
#     return plot_url
#     #return args
#     # return render(request,'prehandle.html',args)

def getWordCloud(request):
    return render(request,"wordCloud_result.html")


def get_emotion_analysis(request):
    return render(request,"charts.html")

def get_charts(request):
    return render(request,"charts.html")

def get_result(request):
    return render(request,'someresult.html')