import jieba
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense, Dropout
from tensorflow.keras.utils import to_categorical
import re
import matplotlib.pyplot as plt
import seaborn as sns
import joblib

# 1. 读取数据
data = []
labels = []
with open("jd_comments.txt", 'r', encoding='utf-8') as f:
    next(f)  # 跳过表头
    for line in f:
        if '\t' in line:
            comment, label = line.strip().split('\t')
            data.append(comment)
            labels.append(int(label))

print(f"数据加载完成，共 {len(data)} 条评论")
print(f"正面评论: {sum(labels)} 条，负面评论: {len(labels) - sum(labels)} 条")

# 2. 定义中文停用词表
chinese_stopwords = [
    '的', '了', '在', '是', '我', '有', '和', '就', '不', '人', '都', '一', '一个', '上', '也', '很', '到', '说', '要',
    '去', '你', '会', '着', '没有', '看', '好', '自己', '这', '但', '什么', '我们', '这个', '那个', '和', '与', '或',
    '且', '如果', '因为', '所以', '但是', '然后', '可以', '应该', '能够', '需要', '必须', '一些', '一点', '这种',
    '那种', '这样', '那样', '怎么', '为什么', '哪里', '什么时候', '谁', '什么', '怎样', '多少', '几个', '各种', '各个',
    '每个', '这种', '那种', '这些', '那些', '它们', '他们', '她们', '我们', '你们', '咱们', '大家', '别人', '有人',
    '每个人', '任何人', '没有人', '某人', '某些人', '某些东西', '某些事情', '某些地方', '某些时候', '某些原因',
    '某些方式', '啊', '哦', '嗯', '唉', '嘛', '吧', '呢', '呀', '啦'
]
stopwords = set(chinese_stopwords)

# 3. 文本预处理和分词
print("开始文本预处理...")
processed_comments = []
for comment in data:
    # 去除标点符号和特殊字符
    text = re.sub(r'[^\w\s]', '', comment)
    # 使用jieba分词
    words = jieba.lcut(text)
    # 去除停用词和单字词
    words = [word for word in words if word not in stopwords and len(word) > 1]
    processed_text = ' '.join(words)
    processed_comments.append(processed_text)

print("预处理后的前5条评论：")
for i in range(5):
    print(f"原始: {data[i]}")
    print(f"处理后: {processed_comments[i]}")
    print("---")

# 4. TF-IDF文本向量化
print("开始TF-IDF向量化...")
tfidf_vectorizer = TfidfVectorizer(
    max_features=500,
    min_df=2,
    max_df=0.8,
    ngram_range=(1, 2)
)
X_tfidf = tfidf_vectorizer.fit_transform(processed_comments).toarray()

# 5. 准备LSTM输入数据
X_sequences = X_tfidf.reshape(X_tfidf.shape[0], X_tfidf.shape[1], 1)

# 准备标签
y = np.array(labels)
y_categorical = to_categorical(y, 2)

# 6. 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(
    X_sequences, y_categorical, test_size=0.2, random_state=42, stratify=y
)

print(f"训练集大小: {X_train.shape}")
print(f"测试集大小: {X_test.shape}")

# 7. 构建LSTM模型
print("构建LSTM模型...")
model = Sequential()
model.add(LSTM(128, return_sequences=True, input_shape=(X_train.shape[1], X_train.shape[2]), dropout=0.2,
               recurrent_dropout=0.2))
model.add(LSTM(64, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(32, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(16, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(2, activation='softmax'))

model.compile(
    optimizer='adam',
    loss='categorical_crossentropy',
    metrics=['accuracy']
)

print("模型结构摘要:")
model.summary()

# 8. 训练模型
print("开始训练模型...")
history = model.fit(
    X_train, y_train,
    epochs=10,
    batch_size=8,
    validation_split=0.2,
    verbose=1
)

# 9. 模型评估
print("开始模型评估...")
test_loss, test_accuracy = model.evaluate(X_test, y_test, verbose=0)
print(f"测试集准确率: {test_accuracy:.4f}")
print(f"测试集损失: {test_loss:.4f}")

# 预测
y_pred = model.predict(X_test)
y_pred_classes = np.argmax(y_pred, axis=1)
y_true_classes = np.argmax(y_test, axis=1)

# 分类报告
print("\n分类报告:")
print(classification_report(y_true_classes, y_pred_classes, target_names=['负面', '正面']))

# 混淆矩阵
cm = confusion_matrix(y_true_classes, y_pred_classes)
print("混淆矩阵:")
print(cm)

# 10. 测试新评论
print("\n新评论情感分析测试:")
test_comments = [
    "这个产品真的很棒，质量很好！",
    "太差了，完全不好用，后悔购买。",
    "一般般，没什么特别的感觉。",
    "物流很快，商品包装完好，非常满意！",
    "质量太差，用了一次就坏了，不推荐购买。"
]

for comment in test_comments:
    # 预处理新评论
    text_clean = re.sub(r'[^\w\s]', '', comment)
    words = jieba.lcut(text_clean)
    words = [word for word in words if word not in stopwords and len(word) > 1]
    processed_text = ' '.join(words)

    # TF-IDF转换
    text_tfidf = tfidf_vectorizer.transform([processed_text]).toarray()
    text_sequence = text_tfidf.reshape(1, text_tfidf.shape[1], 1)

    # 预测
    prediction = model.predict(text_sequence)
    sentiment = np.argmax(prediction, axis=1)[0]
    confidence = np.max(prediction)

    sentiment_text = "正面" if sentiment == 1 else "负面"
    print(f"评论: '{comment}'")
    print(f"情感: {sentiment_text}, 置信度: {confidence:.4f}")
    print("---")

# 11. 保存模型和向量化器
model.save('sentiment_analysis_lstm_model.h5')
print("模型已保存为 'sentiment_analysis_lstm_model.h5'")
