import torch
import torch.optim as optim
import torch.nn as nn
import jieba
from collections import Counter

# 示例数据：简单的中文情感文本
texts = [
    "这部电影很好看，我很喜欢",
    "这部电影很糟糕，我不喜欢",
    "剧情很精彩，值得一看",
    "演员表演很糟糕，剧情也很差"
]
labels = [1, 0, 1, 0]  # 1 表示正面情感，0 表示负面情感

def preprocess_text(texts):
    all_words=[]
    for text in texts:
        words=jieba.cut(text)
        all_words.extend(words)
    return all_words

def build_vocab(all_words,vocab_size=1000):
    word_counts = Counter(all_words)
    vocab = word_counts.most_common(vocab_size - 1)
    vocab = [word[0] for word in vocab]
    vocab.insert(0, "<UNK>")  # 添加未知词
    word_to_idx = {word: idx for idx, word in enumerate(vocab)}
    return vocab, word_to_idx

def text_to_indices(texts,word_to_idx):
    indices = []
    for text in texts:
        words=jieba.cut(text)
        idx=[word_to_idx[word] if word in word_to_idx else word_to_idx['<UNK>'] for word in words]
        indices.append(idx)
    return indices

# LSTM 模型
class LSTMClassifier(nn.Module):
    def __init__(self, vocab_size, embedding_dim, hidden_dim, output_dim):
        super(LSTMClassifier, self).__init__()
        self.embedding = nn.Embedding(vocab_size, embedding_dim)
        self.lstm = nn.LSTM(embedding_dim, hidden_dim, batch_first=True)
        self.fc = nn.Linear(hidden_dim, output_dim)
        self.sigmoid=nn.Sigmoid()

    def forward(self, x):
        x = self.embedding(x)
        lstm_out, _ = self.lstm(x)
        lstm_out = lstm_out[:, -1, :]  # 取最后一个时间步的输出
        out = self.fc(lstm_out)
        return self.sigmoid(out)

# 数据预处理
all_words = preprocess_text(texts) #将语料库里的所有文本 进行切词 拿到切词数组 但此时 有的词会重复
vocab, word_to_idx = build_vocab(all_words, vocab_size=100) #先根据预料库里所有词 统计高频词汇 然后过滤获得前1000名的高频词 构建词汇字典
indices = text_to_indices(texts, word_to_idx) #将预料库里的句子 转化为索引数组 方便后续计算
max_len = max(len(idx) for idx in indices) #得到语料库中 最长句子的长度
padded_indices = [idx + [0] * (max_len - len(idx)) for idx in indices] #根据最长长度 补全其它短句子的长度
labels = torch.tensor(labels, dtype=torch.float).unsqueeze(1)

# 定义模型和训练参数
model = LSTMClassifier(vocab_size=len(vocab), embedding_dim=10, hidden_dim=16, output_dim=1)
criterion = nn.BCELoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
num_epochs = 100

# 训练模型
for epoch in range(num_epochs):
    model.train()
    optimizer.zero_grad()
    output = model(torch.tensor(padded_indices))
    loss = criterion(output, labels)
    loss.backward()
    optimizer.step()
    if (epoch + 1) % 10 == 0:
        print(f"Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.item():.4f}")

print("训练完成！")

# 测试新句子
def start(sentence, model, word_to_idx, max_len):
    model.eval()  # 设置为评估模式
    words = list(jieba.cut(sentence))
    indices = [word_to_idx[word] if word in word_to_idx else word_to_idx["<UNK>"] for word in words]
    # 填充序列
    padded_indices = indices + [0] * (max_len - len(indices))
    padded_indices = torch.tensor([padded_indices], dtype=torch.long)  # 转换为张量
    with torch.no_grad():
        output = model(padded_indices)
        prediction = output.item()  # 获取预测概率
    return prediction

# 测试句子
test_sentence = "很好看"
prediction = start(test_sentence, model, word_to_idx, max_len)
print(f"测试句子: {test_sentence}")
print(f"预测概率: {prediction:.4f}")
print(f"情感预测: {'正面' if prediction > 0.5 else '负面'}")
