# 导入需要的第三方库
import numpy as np
from nltk.corpus import brown
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import nltk
from nltk.corpus import stopwords
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report, accuracy_score
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import CountVectorizer

# 设置全局字体
plt.rcParams["font.family"] = "sans-serif"
plt.rcParams["font.sans-serif"] = ["Microsoft YaHei"]  # 微软雅黑
plt.rcParams["axes.unicode_minus"] = False  # 解决负号显示问题

brown.categories()
"""
使用nltk库来进行英文文本的情感分析
"""


class SentimentAnalysis:
    def __init__(self, filename):
        self.texts = []  # 文本数据的每一行文本
        self.labels = []  # 对应的标签
        self.results = []  # 输出的标签结果
        file_path = "../InputData/"
        with open(file_path + filename + ".txt", "r") as file:
            for line in file:
                # 去除每行末尾的换行符并按TAB分割
                line = line.strip()
                if line:  # 确保不是空行
                    parts = line.split("\t")
                    if len(parts) == 2:  # 确保行包含文本和标签两部分
                        text, label = parts
                        self.texts.append(text)
                        self.labels.append(int(label))

    # 输出去除标签后的文本数据集
    def output_data(self, name1):
        filename1 = "../OutputData/" + name1 + ".txt"
        # 打开文件，准备写入
        with open(filename1, "w", encoding="utf-8") as file:
            # 遍历列表中的每个元素
            for item in self.texts:
                # 将每个元素写入文件，每个元素后跟一个换行符
                file.write(item + "\n")

    # 添加数值标注到柱子上
    def add_labels(self, bars):
        for bar in bars:
            height = bar.get_height()
            self.ax.annotate(
                "{}".format(height),
                xy=(bar.get_x() + bar.get_width() / 2, height),
                xytext=(0, 3),  # 3 points vertical offset
                textcoords="offset points",
                ha="center",
                va="bottom",
            )

    # 数据预处理
    def preprocess(self, text):
        # 分词
        words = nltk.word_tokenize(text.lower())
        # 去除停用词和特殊字符
        stop_words = set(stopwords.words("english"))
        filtered_words = [
            word for word in words if word not in stop_words and word.isalpha()
        ]
        return " ".join(filtered_words)

    def sentiment_analysis1(self):
        # 情感分析
        # compound表示复杂程度,neu表示中性,neg表示负面情绪,pos表示正面情绪
        # 创建分类器
        sid = SentimentIntensityAnalyzer()
        # 英文情感分析
        for sentence in self.texts:
            if (
                sid.polarity_scores(sentence)["neg"]
                >= sid.polarity_scores(sentence)["pos"]
            ):
                self.results.append(0)
            else:
                self.results.append(1)
        # 计算相同元素的数量
        matches = sum(
            1 for i in range(len(self.labels)) if self.labels[i] == self.results[i]
        )
        # 计算相同率
        similarity_rate = round(matches / len(self.labels), 2)
        print(similarity_rate)
        # 结果可视化
        # 计算 0 和 1 的数量
        count1 = [self.labels.count(0), self.labels.count(1)]
        count2 = [self.results.count(0), self.results.count(1)]

        # 设置柱状图的标签
        labels = ["0", "1"]
        x = np.arange(len(labels))  # 横坐标标签的位置

        # 设置柱状图的宽度
        width = 0.35

        # 创建柱状图
        fig, self.ax = plt.subplots()
        bars1 = self.ax.bar(x - width / 2, count1, width, label="labels", color="blue")
        bars2 = self.ax.bar(
            x + width / 2, count2, width, label="results", color="orange"
        )

        # 添加一些必要的文本描述
        self.ax.set_ylabel("数量")
        self.ax.set_title("0 和 1 的数量比较")
        self.ax.set_xticks(x)
        self.ax.set_xticklabels(labels)
        self.ax.legend()
        self.add_labels(bars1)
        self.add_labels(bars2)

        # 显示图形
        plt.tight_layout()
        plt.show()

    def sentiment_analysis2(self):
        x = [None] * len(self.texts)  # 根据 self.texts 的长度初始化 x
        # 文本预处理
        for i in range(len(self.texts)):
            x[i] = self.preprocess(self.texts[i])
        # 文本标签
        y = self.labels
        x_train, x_test, y_train, y_test = train_test_split(
            x, y, test_size=0.2, random_state=42
        )  # 划分训练集和测试集
        # 特征提取，使用 CountVectorizer 将文本转换为特征向量
        # 创建转换器
        vectorizer = CountVectorizer()
        x_train_counts = vectorizer.fit_transform(x_train)  # 文本训练向量
        x_test_counts = vectorizer.transform(x_test)  # 文本测试向量
        # 使用 MultinomialNB 训练模型
        model = MultinomialNB()
        model.fit(x_train_counts, y_train)
        # 评估模型
        y_pred = model.predict(x_test_counts)

        # 输出准确率和分类报告
        report = classification_report(y_test, y_pred)
        print("Accuracy:", accuracy_score(y_test, y_pred))
        print("\nClassification Report:\n", report)


# 主函数
if __name__ == "__main__":
    sa1 = SentimentAnalysis("yelp_labelled")
    sa1.sentiment_analysis1()
    sa1.output_data("example1")
