'''
Training time: 1.65 seconds
Final Test Accuracy: 0.7989
'''

import os
import time
from datasets import load_dataset
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.metrics import accuracy_score
from collections import Counter
import json

# 控制是否跳过训练并直接加载已训练好的模型
skip_training = False  # 设置为 True 跳过训练，False 重新训练

# 配置模型存储路径
trained_model_path = "./results/optimized_sentiment_lexicon.json"

# 加载本地数据集（离线加载 Parquet 文件）
data_files = {
    "train": "./imdb/plain_text/train-00000-of-00001.parquet",
    "test": "./imdb/plain_text/test-00000-of-00001.parquet",
}
dataset = load_dataset("parquet", data_files=data_files)

# 加载训练和测试数据
train_texts = [x["text"] for x in dataset["train"]]
train_labels = [x["label"] for x in dataset["train"]]
test_texts = [x["text"] for x in dataset["test"]]
test_labels = [x["label"] for x in dataset["test"]]

def compute_sentiment_lexicon(texts, labels, min_frequency=2, weight_threshold=0.3):
    """
    生成优化的情感词典：
    1. 去停用词和非字母字符。
    2. 使用词频过滤。
    3. 计算词的情感权重，筛选正负词。
    """
    pos_counter = Counter()
    neg_counter = Counter()

    # 统计正负样本中词频
    for text, label in zip(texts, labels):
        words = text.lower().split()
        filtered_words = [word for word in words if word.isalpha() and word not in ENGLISH_STOP_WORDS]
        if label == 1:  # 正面
            pos_counter.update(filtered_words)
        else:  # 负面
            neg_counter.update(filtered_words)

    # 计算词权重
    word_scores = {}
    all_words = set(pos_counter.keys()).union(set(neg_counter.keys()))
    for word in all_words:
        pos_count = pos_counter[word]
        neg_count = neg_counter[word]
        total = pos_count + neg_count
        if total >= min_frequency:  # 仅保留频率大于等于 min_frequency 的词
            score = (pos_count - neg_count) / total  # 情感权重：正负样本中的比例差异
            word_scores[word] = score

    # 根据权重划分正负情感词
    positive_words = {word for word, score in word_scores.items() if score > weight_threshold}
    negative_words = {word for word, score in word_scores.items() if score < -weight_threshold}

    return positive_words, negative_words

def analyze_sentiment(text, positive_words, negative_words):
    """
    基于情感词典的情感分析函数
    """
    words = text.lower().split()
    positive_score = sum(1 for word in words if word in positive_words)
    negative_score = sum(1 for word in words if word in negative_words)
    return 1 if positive_score > negative_score else 0  # 1 表示正面，0 表示负面

if not skip_training:
    print("Training model...")

    # 记录训练开始时间
    time_start = time.time()

    # 生成优化的情感词典
    positive_words, negative_words = compute_sentiment_lexicon(train_texts, train_labels)

    # 保存情感词典
    os.makedirs(os.path.dirname(trained_model_path), exist_ok=True)
    with open(trained_model_path, "w") as f:
        json.dump({"positive_words": list(positive_words), "negative_words": list(negative_words)}, f)
    
    # 记录训练结束时间
    time_end = time.time()
    print(f"Model saved to {trained_model_path}")
    print(f"Training time: {time_end - time_start:.2f} seconds")

else:
    print("Loading pre-trained model...")
    # 加载已保存的情感词典
    with open(trained_model_path, "r") as f:
        lexicon = json.load(f)
        positive_words = set(lexicon["positive_words"])
        negative_words = set(lexicon["negative_words"])

# 测试模型性能
print("Evaluating model...")
test_predictions = [analyze_sentiment(text, positive_words, negative_words) for text in test_texts]
accuracy = accuracy_score(test_labels, test_predictions)
print(f"Final Test Accuracy: {accuracy:.4f}")

# 测试单条输入文本的情感分类
text = "I absolutely loved this movie! It was amazing and so well acted."
predicted_class = analyze_sentiment(text, positive_words, negative_words)
print("Predicted Sentiment:", "Positive" if predicted_class == 1 else "Negative")
