import jieba
import pickle
import pyLDAvis
import pyLDAvis.sklearn
from datetime import datetime
import matplotlib.pyplot as plt
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.decomposition import LatentDirichletAllocation

class Singleton:
	_instance=None
	def __init__(self, name, volume):
		self.name=name
		self.volume=volume

	

class ImageProcessorGrayScale(ImageProcessor, object):  #添加灰度化处理相关类
    # 灰度化处理
    def process(self):
        super().process()
        image_greyscale = ImageProcessor.im.convert('L')  # 对图片灰度化
        image_greyscale.show()
        return image_greyscale

def sort_doc(file_path):
    """将weibo.txt文本按照天数进行合并，参数接受文档路径，返回按照天数分类的结果"""
    data_by_day = {}
    # 读取文本内容
    with open(file_path, 'r', encoding='utf-8') as f:
        for line in f:
            fields = line.strip().split('\t')
            coords = fields[0]
            text = fields[1]
            time_str = fields[2]
            # datetime格式转化
            day = datetime.strptime(time_str, "%a %b %d %H:%M:%S %z %Y").day
            month = datetime.strptime(time_str, "%a %b %d %H:%M:%S %z %Y").month
            time = str(month)+str(day)  # 天数表示，月份加上天数 如1025,1130
            # 添加到字典中
            if time in data_by_day:
                data_by_day[time].append((coords, text))
            else:
                data_by_day[time] = [(coords, text)]
    return data_by_day


def my_stopwords():
    """读取停用词表 """
    with open('D:\\Download\\python-week2\\my_stopwords.txt', 'r', encoding='utf-8') as fp:
        stopwords = [line.strip() for line in fp.readlines()]
    return stopwords


def word_process(stopwords, sentence):
    """对文档进行预处理，分词，去除停用词，标点，
       用空格将每个文档的词连接为字符串
    """
    # jieba分词，去除停用词，标点
    words = [word for word in jieba.cut(sentence) if word not in stopwords]
    # 分词结果用空格连接成字符串
    result = ' '.join(words)
    return result



def word_count(docs):
    vectorizer = CountVectorizer()
    x = vectorizer.fit_transform(docs)

    # 计算困惑度绘制elbow图确定主题数量
    perplexity_scores = []
    k_range = range(1, 6)  # 假设k的范围是1到5
    for k in k_range:
        lda = LatentDirichletAllocation(n_components=k)
        lda.fit(x)
        perplexity_scores.append(lda.perplexity(x))
    plt.plot(k_range, perplexity_scores, '-o')
    plt.xlabel('Number of topics')
    plt.ylabel('Perplexity')
    plt.show()

    k = 2
    lda = LatentDirichletAllocation(n_components=k)
    lda.fit(x)
    # 输出每个主题对应的词语
    feature_names = vectorizer.get_feature_names_out()
    for i, topic in enumerate(lda.components_):
        print(f"Topic {i}:")
        top_words = [feature_names[j] for j in topic.argsort()[:-6:-1]]
        print(top_words)
    # 输出每篇文档的主题概率分布
    for i in range(len(docs)):
        print(f"Document{i}:")
        print(lda.transform(x[i]))
    # 输出结果
    pickle.dump((lda, x, vectorizer),open('.lda_model.pkl', 'wb'))
    data = pyLDAvis.sklearn.prepare(lda, x, vectorizer)
    pyLDAvis.display(data)  # 在notebook的outputcell中显示

    pyLDAvis.show(data)  # 在浏览器中心打开一个界面

def add_emotion_dict_path(*dict_path):  #添加函数add_emotion_dict_path
    '''
        用户输入n个参数，表示n个情绪词典的路径，函数返回其路径构成的元组。
    '''
    return dict_path

def creat_emotion_lists(dict_path):  #添加函数creat_emotion_lists
    '''
        此处创建一个二位列表。这个二位列表将所有情绪词入其中，以便调用分词
    '''
    get_dic_file = dict_path[:]
    emotion_type_list = [[] for _ in range(len(get_dic_file))]
    for i in range(len(get_dic_file)):
        with open(get_dic_file[i], 'r', encoding='utf-8') as temp_dic:
            for line in temp_dic:
                emotion_type_list[i].append(line[:-1])
    return emotion_type_list

#数据清理
def clean(str):
    str = re.sub(r"(回复)?(//)?\s*@\S*?\s*(:| |$)", " ", str)#去除评论中的特殊符号，如转发，回复，@
    r = "[A-Za-z0-9_.!+-=——,$%^，。？、~@#￥%……&*《》<>「」{}【】()/\\\[\]'\"]"
    #去除中文标点符号
    for i in punctuation_str:
        str = str.replace(i,"")
    str = re.sub(r, "", str)#去除标点符号
    str = re.sub(r"\[\S+\]", "",str)#去除表情
    URL_REGEX = re.compile(
        r'(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:\'".,<>?«»“”‘’]))',
        re.IGNORECASE)
    str = re.sub(URL_REGEX, "", str)       # 去除网址
    str = str.replace("转发微博", "")       # 去除无意义的词语
    str = re.sub(r"\s+", " ", str) # 合并正文中过多的空格
    return str.strip()

if __name__ == '__main__':
    docs = word_joint()
    word_count(docs)

# hey6452:S
if __name__ == '__main__':
    docs = word_joint()
    word_count(docs)


