from cmath import pi
from genericpath import exists
from importlib.resources import path
from pydoc import doc
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import LatentDirichletAllocation
import pandas as pd
#import os
import jieba
import numpy as np
import pickle
import os
from bertopic import BERTopic
import spacy
import re

def stopwordslist():
    data=pd.read_csv("./stopwords.csv")
    return set(data["stopword"])

def chinese_word_cut(mytext):
    jieba.load_userdict("./reserve.txt")
    return jieba.cut(mytext, cut_all=False)


if os.path.exists("tbert"):
    topic_model = BERTopic.load("tbert")
else:

    df = pd.read_csv("./output.csv")
    df=df.fillna('')



    stopwords = stopwordslist()
    dups=[]
    for index,row in df.iterrows():
        cutwords=set(chinese_word_cut(row["content"]))
        dup=cutwords-stopwords
        dups.append(" ".join(list(dup)))
    df["content_cutted"] = dups
    punc = '~`!#$%^&*()_+-=|\';":/.,?><~·！@#￥%……&*（）——+-=“：’；、。，？》《{}'
    docs = df['content_cutted'].tolist()
    docs = [re.sub('[0-9a-zA-Z]', '', s) for s in docs]
    docs = [re.sub(r"[%s]" % punc, '', s) for s in docs]
    docs = [s for s in docs if len(s) > 1]
    # print(docs)

    zh_model = spacy.load("zh_core_web_sm")
    topic_model = BERTopic(language="chinese (simplified)", 
                           nr_topics=15,
                        embedding_model=zh_model,
                        calculate_probabilities=False, 
                        verbose=True)
    #2000条进行fit_transform需要1min
    topics, probs = topic_model.fit_transform(docs)
    topic_model.save("tbert")

print(topic_model.get_topic_info())
topic_model.visualize_barchart(topic_model.get_topics(), top_n_topics=6, n_words=15).show()
# print(topic_model.get_topic(0))
# print(topic_model.get_topic(1))
# print(topic_model.get_topic(2))
# print(topic_model.get_topic(3))
# topic_idx = [x_i.argmax() for x_i in X_new]
# from collections import Counter
# cnt = Counter(topic_idx)
# print(cnt)
# df["topic_idx"] = topic_idx
# df.to_csv("out_lda.csv")

# import pyLDAvis
# import pyLDAvis.sklearn
# data = pyLDAvis.sklearn.prepare(lda, tf, tf_vectorizer)
# pyLDAvis.show(data)
