# 循环神经网络
#输入层 隐藏层 输出层




import numpy as np
import pandas as pd
import re
import nltk
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
import tensorflow as tf
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Embedding, LSTM, Dense, Dropout
from tensorflow.keras.callbacks import EarlyStopping

# 方法1: 设置NLTK使用国内镜像源
try:
    # 使用清华镜像源
    nltk.download('stopwords', quiet=True, download_dir='./nltk_data',
                  url='https://mirrors.tuna.tsinghua.edu.cn/nltk/nltk_data/')
    nltk.download('wordnet', quiet=True, download_dir='./nltk_data',
                  url='https://mirrors.tuna.tsinghua.edu.cn/nltk/nltk_data/')
    nltk.data.path.append('./nltk_data')
except:
    print("使用镜像源下载失败，尝试其他方法...")

# 方法2: 手动下载资源并放置到正确位置
# 如果你已经手动下载了资源，可以取消下面的注释
# nltk.data.path.append('path/to/your/nltk_data')  # 替换为你的nltk_data目录路径

# 方法3: 使用备用的停用词列表
try:
    nltk.data.find('corpora/stopwords')
    nltk.data.find('corpora/wordnet')
    print("NLTK资源已成功加载")
except LookupError:
    print("无法加载NLTK资源，使用备用方案...")

    # 使用备用停用词列表
    STOPWORDS = set([
        'i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', 'your', 'yours',
        'yourself', 'yourselves', 'he', 'him', 'his', 'himself', 'she', 'her', 'hers', 'herself',
        'it', 'its', 'itself', 'they', 'them', 'their', 'theirs', 'themselves', 'what', 'which',
        'who', 'whom', 'this', 'that', 'these', 'those', 'am', 'is', 'are', 'was', 'were', 'be',
        'been', 'being', 'have', 'has', 'had', 'having', 'do', 'does', 'did', 'doing', 'a', 'an',
        'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of', 'at', 'by', 'for',
        'with', 'about', 'against', 'between', 'into', 'through', 'during', 'before', 'after', 'above',
        'below', 'to', 'from', 'up', 'down', 'in', 'out', 'on', 'off', 'over', 'under', 'again',
        'further', 'then', 'once', 'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any',
        'both', 'each', 'few', 'more', 'most', 'other', 'some', 'such', 'no', 'nor', 'not', 'only',
        'own', 'same', 'so', 'than', 'too', 'very', 's', 't', 'can', 'will', 'just', 'don', 'should',
        'now'
    ])


    # 定义一个备用的词形还原器
    class SimpleLemmatizer:
        def lemmatize(self, word):
            # 简单的词形还原，仅处理最常见的情况
            if word.endswith('s'):
                return word[:-1]
            return word


    # 创建备用的词形还原器
    lemmatizer = SimpleLemmatizer()
else:
    # 如果NLTK资源加载成功，使用正常的停用词和词形还原器
    STOPWORDS = set(stopwords.words('english'))
    lemmatizer = WordNetLemmatizer()



# 数据准备
sample_data = [
    ["This movie was terrible. I hated it.", 0],
    ["The film was amazing! Great acting and story.", 1],
    ["Waste of time. Don't watch this movie.", 0],
    ["Fantastic experience. Loved every minute.", 1],
    ["Not recommended. Boring plot and bad dialogue.", 0],
    ["Best movie I've seen this year. Highly recommend!", 1]
]

# 创建DataFrame
df = pd.DataFrame(sample_data, columns=['review', 'sentiment'])


# 数据预处理函数
def preprocess_text(text):
    # 转换为小写
    text = text.lower()
    # 移除特殊字符和数字
    text = re.sub(r'[^a-zA-Z]', ' ', text)
    # 分词
    words = text.split()
    # 移除停用词
    words = [w for w in words if not w in STOPWORDS]
    # 词形还原
    words = [lemmatizer.lemmatize(w) for w in words]
    # 重新组合文本
    return ' '.join(words)


# 应用预处理
df['clean_review'] = df['review'].apply(preprocess_text)

# 分割训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(
    df['clean_review'], df['sentiment'], test_size=0.2, random_state=42
)

# 文本向量化
max_words = 10000  # 词汇表大小
max_len = 200  # 序列最大长度

tokenizer = Tokenizer(num_words=max_words)
tokenizer.fit_on_texts(X_train)

X_train_seq = tokenizer.texts_to_sequences(X_train)
X_test_seq = tokenizer.texts_to_sequences(X_test)

# 填充序列
X_train_pad = pad_sequences(X_train_seq, maxlen=max_len)
X_test_pad = pad_sequences(X_test_seq, maxlen=max_len)

# 构建LSTM模型
embedding_dim = 128

model = Sequential([
    Embedding(max_words, embedding_dim, input_length=max_len),
    LSTM(128, return_sequences=False),
    Dropout(0.5),
    Dense(64, activation='relu'),
    Dropout(0.5),
    Dense(1, activation='sigmoid')
])

model.compile(
    optimizer='adam',
    loss='binary_crossentropy',
    metrics=['accuracy']
)

# 早停法防止过拟合
early_stopping = EarlyStopping(
    monitor='val_loss',
    patience=3,
    restore_best_weights=True
)

# 训练模型
history = model.fit(
    X_train_pad, y_train,
    epochs=10,
    batch_size=32,
    validation_split=0.2,
    callbacks=[early_stopping]
)

# 评估模型
loss, accuracy = model.evaluate(X_test_pad, y_test)
print(f'测试集准确率: {accuracy:.4f}')


# 预测新评论
def predict_sentiment(review):
    # 预处理评论
    clean_review = preprocess_text(review)
    # 转换为序列
    review_seq = tokenizer.texts_to_sequences([clean_review])
    # 填充序列
    review_pad = pad_sequences(review_seq, maxlen=max_len)
    # 预测
    prediction = model.predict(review_pad)[0][0]
    sentiment = "正面" if prediction >= 0.5 else "负面"
    return sentiment, prediction



# 测试新评论
test_review = "This movie was fantastic! I really enjoyed it."
sentiment, score = predict_sentiment(test_review)
print(f"评论情感: {sentiment}, 置信度: {score:.4f}")

# 保存模型
model.save('sentiment_analysis_model.h5')
