# 导入所需的库
import pandas as pd
import re
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.decomposition import LatentDirichletAllocation

# 载入英文停用词集stopwords
stop_words = set(stopwords.words("english"))


# 定义文本分类类
class TextClassification:
    def __init__(self, filename):
        # 加载数据，数据文件以.tsv格式存储，字段由制表符分隔
        file_path = "../InputData/"
        self.data = pd.read_csv(file_path + filename + ".tsv", delimiter="\t")
        # 处理缺失值，删除在特定字段中有空值的行
        self.data.dropna(
            subset=["benefitsReview", "sideEffectsReview", "commentsReview"],
            inplace=True,
        )
        self.texts = (
            self.data["benefitsReview"]
            + " "
            + self.data["sideEffectsReview"]
            + " "
            + self.data["commentsReview"]
        )
        # 对特定字段应用文本预处理
        self.data["benefitsReview_cleaned"] = self.data["benefitsReview"].apply(
            self.preprocess_text
        )
        self.data["sideEffectsReview_cleaned"] = self.data["sideEffectsReview"].apply(
            self.preprocess_text
        )
        self.data["commentsReview_cleaned"] = self.data["commentsReview"].apply(
            self.preprocess_text
        )

    # 输出合并后的文本项数据集
    def output_data(self, name1):
        filename1 = "../OutputData/" + name1 + ".txt"
        # 打开文件，准备写入
        with open(filename1, "w", encoding="utf-8") as file:
            # 遍历列表中的每个元素
            for item in self.texts:
                # 将每个元素写入文件，每个元素后跟一个换行符
                file.write(item + "\n")

    # 文本清理函数，用于去除停用词、特殊字符和数字，并将文本转换为小写
    def preprocess_text(self, text):
        if pd.isna(text):  # 检查文本是否为NaN
            return ""  # 如果是NaN，则返回空字符串
        # 将文本转换为小写
        text = text.lower()
        # 去除特殊字符和数字
        text = re.sub(r"[^a-zA-Z\s]", "", text)
        # 去除停用词
        text = " ".join(word for word in text.split() if word not in stop_words)
        return text

    # 输出每个主题的关键词函数
    def print_top_words(self, model, feature_names, n_top_words):
        for topic_idx, topic in enumerate(model.components_):
            print(f"Topic {topic_idx + 1}:")
            print(
                " ".join(
                    [feature_names[i] for i in topic.argsort()[: -n_top_words - 1 : -1]]
                )
            )

    # 使用Latent Dirichlet Allocation (LDA)进行主题建模
    def train_model(self):
        # 将所有评论字段合并为一个文本列
        self.combined_reviews = (
            self.data["benefitsReview_cleaned"]
            + " "
            + self.data["sideEffectsReview_cleaned"]
            + " "
            + self.data["commentsReview_cleaned"]
        )
        # 使用CountVectorizer将文本数据转换为词频矩阵
        vectorizer = CountVectorizer()
        x = vectorizer.fit_transform(self.combined_reviews)

        # 创建LDA模型，指定主题数量为10，关键词数为5，并设置随机状态以确保结果可复现
        lda = LatentDirichletAllocation(n_components=10, random_state=42)
        lda.fit(x)
        self.print_top_words(lda, vectorizer.get_feature_names_out(), 5)

        # 通过分析主题关键词，推测与药品相关的关键词，并与urlDrugName字段结合，以找出潜在的药品名称
        # 创建一个包含关键词和相关药品名称的字典
        topic_to_drug_mapping = {}

        # 用主题与药品名称的关系进行匹配
        for topic_idx in range(lda.n_components):
            keywords = [
                vectorizer.get_feature_names_out()[i]
                for i in lda.components_[topic_idx].argsort()[:-6:-1]
            ]
            topic_to_drug_mapping[f"Topic {topic_idx + 1}"] = keywords

        # 打印每个主题及其相关关键词
        for topic, keywords in topic_to_drug_mapping.items():
            print(f"{topic}: {keywords}")


# 程序的主入口
if __name__ == "__main__":
    tc1 = TextClassification("drugLibTest_raw")
    tc1.train_model()
    tc1.output_data("example2")
