"""
加载数据 数据预处理  jieba分词
"""
import gensim.models
import pandas as pd
import numpy as np
import jieba as jb
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report,accuracy_score
from sklearn.cluster import KMeans,MiniBatchKMeans
from sklearn.svm import SVC


"""
def dataLoadToList(file_path):
    data = pd.read_excel(file_path)
    commentData = data["comment_content"]
    commentDataTag = data["comment_tag"]
    return commentData, commentDataTag


# 去停用词和标点符号
def participle(fileFrame, stop_path):
    stop_list = [line[:-1] for line in open(stop_path, 'r', encoding='UTF-8')]
    result_list = []
    for sentence in list(fileFrame):
        sentence = sentence.strip()
        cutList = jb.lcut(sentence)
        after_stop_list = []
        for word in cutList:
            word = word.strip()  # 词中空白字符
            if word not in stop_list and word != "":
                after_stop_list.append(word)
        result_list.append(after_stop_list)
    return result_list


def word2Vec_train(good_doc, doc_tag):
    pre_doc = []
    # 使用count当做每个句子的“标签”，标签和每个句子是一一对应的
    count_tag = [count for count in range(len(good_doc))]
    TD = gensim.models.doc2vec.TaggedDocument
    for (i, doc) in zip(count_tag, good_doc):
        doc_new = TD(doc, tags=[i])
        pre_doc.append(doc_new)

    model = gensim.models.Doc2Vec(pre_doc, vector_size=50, window=3,
                                  min_count=1, workers=4)

    return model


filePath = "D:\\java\\NLP\\EmotionRecognition\\data\\TrainingData.xlsx"
stop_file_path = "D:\\java\\NLP\\EmotionRecognition\\data\\hit_stopwords.txt"

'''
result = participle(dataLoadToList(filePath)[0], stop_file_path)
resultM = word2Vec_train(result, dataLoadToList(filePath)[1])
resultM.save("zhu2.model")
'''

zhuModel = gensim.models.doc2vec.Doc2Vec.load("zhu2.model")
# print(zhuModel.docvecs.similarity(0, 4532))  # 两个句子的相似度
# print(zhuModel.docvecs.index_to_key)
# print(zhuModel.docvecs[1])  # 两种方法获取对应标签的文档向量
# print(zhuModel.docvecs.get_vector(1))
# words = u"这 蛋糕 真 他妈 好吃"
# words2 = u"这 蛋糕 太 好吃 了"
# a = zhuModel.infer_vector(words2.split())
# b = zhuModel.infer_vector(words.split())
# print(zhuModel.docvecs.most_similar(a))


# 利用训练好的模型 对测试数据进行向量化  返回一个向量化之后的列表
#[d,da,da,da]

def vectorizationWithModel(model_path, data_list):
    Model = gensim.models.doc2vec.Doc2Vec.load(model_path)
    result = []
    for sentence in data_list:
        vector = Model.infer_vector(sentence)
        result.append(vector)
    return result

testFile_path = "D:\\java\\NLP\\EmotionRecognition\\data\\TestData.xlsx"
modelVec_path = "zhu2.model"
testFile_list = participle(dataLoadToList(testFile_path)[0], stop_file_path)
testFenLeiList = vectorizationWithModel(modelVec_path,testFile_list)

#使用分类模型进行聚类

# 这里使用朴素贝叶斯分类器 SVN K-means聚类 三种方法进行分类
# train function(shuju,suanfa,xunlianji,ceshiji,qiefenbili)

def TrainClassificationModel(data, tag, module_name, proportion):
    dataArray = np.array(data)
    print(dataArray[1])
    tagArray = np.array(tag)
    X_train, X_test, y_train, y_test = train_test_split(dataArray, tagArray, test_size=proportion)
    if module_name == "Naive Bayes":
        gnb = GaussianNB()
        gnb.fit(X_train, y_train)
        result = gnb.predict(X_test)
        print(classification_report(y_test, result))
        print("预测准确率为：", gnb.score(X_test, y_test))
    elif module_name == "Kmeans":
        km = KMeans(n_clusters=2)
        km.fit(X_train)
        result = km.predict(X_test)
        print(classification_report(y_test, result))
        print("K-means算法预测准确率为:",km.score(X_test, y_test))
    elif module_name == "SVM":
        clf_0 = SVC(kernel='rbf', random_state=0, gamma=1, C=1)
        # clf_0 = SVC(C=1,cache_size=200,class_weight=None,coef0=0.0,
        #     decision_function_shape='ovr',degree=3,gamma='auto',kernel='linear',
        #     max_iter=-1,probability=False,random_state=0,shrinking=True,
        #     tol=0.001,verbose=False)
        # clf_1 = SVC(C=1, kernel='rbf', gamma=20, decision_function_shape='ovr') # 使用rbf径向基函数来讲低维数据转化为高维数据，使其可分
        #clf_2 = SVC(C=1, kernel='linear', gamma=20, decision_function_shape='ovr')
        #clf_3 =  SVC(C=1, kernel='poly', gamma=20, decision_function_shape='ovr')
        # clf_4 = SVC(C=1, kernel='sigmoid', gamma=20, decision_function_shape='ovr')
        print(y_train)
        print(X_train)
        clf_0.fit(X_train, y_train)
        result = clf_0.predict(X_test)
        print(classification_report(y_test,result))
        print("SVM算法预测准确率为:", clf_0.score(X_test, y_test))
    else:
        print("not module you change")
        return

#贝叶斯模型测试
FenLeiTrainList = [zhuModel.docvecs[i] for i in range(len(zhuModel.docvecs))]
TrainClassificationModel(FenLeiTrainList, dataLoadToList(filePath)[1], "SVM", 0.3)
"""
#参数调优 初始准确率 0.65 0.29(异常) 0.83

"""
a={"zhu":1,"huang":"zhujiaqi"}
print(a.get("zhu"))
print(a.get("huang"))
"""