import jieba
from sklearn.manifold import TSNE
from gensim.models import Word2Vec
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.font_manager import FontProperties
YaHei = FontProperties(fname='C:\\Windows\\Fonts\\HGKT2_CNKI.TTF')

##新添加
import inspect
class BaseClass:
	num_base_calls=0
	def call_me(self):
		print("calling method on Base Class")
		BaseClass.num_base_calls+=1

class LeftSubclass(BaseClass):
	num_left_calls=0
	def call_me(self):
		#BaseClass.call_me(self)
		super().call_me()
		print("calling method on Left Subclass")
		LeftSubclass.num_left_calls+=1
###新添加

class TextAnalyzer:
    """文本训练模型类定义"""
    def __init__(self, analyse_loc, model_loc, vector_size, win_size, min_count):
        self.analyse_loc = analyse_loc   # 待分析文本路径，weibo.txt
        self.model_loc = model_loc       # 待加载的预训练模型路径
        self.vector_size = vector_size    # 维度
        self.win_size = win_size        # 窗口大小
        self.count = min_count        # 统计的最下次数

    def word_process(self):
        """对待分析的文本进行处理，分词并去除停用词，存放在一个二维列表中"""
        # 读取停用词
        with open('D:\\Download\\python-week2\\my_stopwords.txt', 'r', encoding='utf-8') as fp:
            stopwords = [line.strip() for line in fp.readlines()]
        self.sentences = []
        # 对分析文本进行分词去除停用词处理
        with open(self.analyse_loc, 'r', encoding='utf-8') as f:
            for line in f:
                sen = [w for w in jieba.cut(line.strip().split('\t')[1]) if w not in stopwords]
                self.sentences.append(sen)
        print(self.sentences[:10]) # 打印出前10条

    def model_create(self):
        """利用文本创建模型model"""
        self.model = Word2Vec(self.sentences, vector_size=self.vector_size, window=self.win_size, min_count=self.count)






    def model_expand(self):
        """情感词典的扩充"""
        list_order = ['anger', 'disgust', 'fear', 'joy', 'sadness']
        for i in range(5):
            with open("D:/Download/emotion_lexicon/" + list_order[i] + ".txt", 'r', encoding='utf-8') as file_store:
                one_sentence = [line.strip() for line in file_store.readlines()]
                extend_emo = []
                for item in one_sentence:
                    if self.model_loa.wv.__contains__(item):
                        related_emo = self.model_loa.wv.most_similar(item, topn=5)
                        for k in range(5):
                            extend_emo.append(related_emo[k][0])
                one_sentence.extend(extend_emo)

                with open("D:/Download/emotion_lexicon/new_"+list_order[i]+'.txt', 'w', encoding='utf-8') as new_file:
                    new_file.write('\n'.join(one_sentence))

    def model_show(self):
        """进行可视化降维输出"""
        # 将最相关和最不相关的词汇向量合并为一个数组
        vectors = np.array([self.model.wv[word] for word, similarity in self.most_similar + self.least_similar])
        print(vectors.shape)
        words = [word for word, similarity in self.most_similar + self.least_similar]
        # 使用t-SNE算法对词向量进行降维
        tsne = TSNE(n_components=2, perplexity=10)
        vectors_tsne = tsne.fit_transform(vectors)
        # 可视化降维后的词向量
        fig, ax = plt.subplots()
        ax.set_title(self.s, fontproperties=YaHei)
        ax.scatter(vectors_tsne[:10, 0], vectors_tsne[:10, 1], color='blue')
        ax.scatter(vectors_tsne[10:, 0], vectors_tsne[10:, 1], color='red')
        for i, word in enumerate(words):
            ax.annotate(word, (vectors_tsne[i, 0], vectors_tsne[i, 1]), fontproperties=YaHei)
        plt.show()


if __name__ == "__main__":
    text = TextAnalyzer('D:/Download/python-week2/weibo.txt', 'D:\\Download\\weibo_59g_embedding_200.model', 300, 5, 1)
    text.word_process()
    text.model_create()
    text.model_judge()
    text.model_save()
    text.model_load()
    text.model_expand()
    text.model_show()




