import torch
import torch.nn as nn
import torch.optim as optim
from torchtext.datasets import IMDB
from torchtext.data import Field, LabelField, BucketIterator
from nltk.tokenize import word_tokenize
import nltk

import jieba

# 创建Field对象处理文本和标签
def jieba_tokenize(text):
    return jieba.lcut(text)

text_field = Field(tokenize=jieba_tokenize, lower=True, include_lengths=True)



label_field = LabelField(dtype=torch.float)

# 加载AclImdbDataset数据集
train_data, test_data = IMDB.splits(text_field, label_field)

# 建立词汇表
text_field.build_vocab(train_data, max_size=10000, vectors="glove.6B.100d")
label_field.build_vocab(train_data)

# 创建迭代器
train_iterator, test_iterator = BucketIterator.splits(
    (train_data, test_data),
    batch_size=64,
    sort_within_batch=True,
    sort_key=lambda x: len(x.text),
    device=torch.device('cuda' if torch.cuda.is_available() else 'cpu'))

# 创建模型
class TextClassifier(nn.Module):
    def __init__(self, vocab_size, embedding_dim, hidden_dim, output_dim, n_layers, bidirectional, dropout):
        super(TextClassifier, self).__init__()
        self.embedding = nn.Embedding(vocab_size, embedding_dim)
        self.lstm = nn.LSTM(embedding_dim, hidden_dim, num_layers=n_layers, bidirectional=bidirectional, dropout=dropout, batch_first=True)
        self.fc = nn.Linear(hidden_dim*2, output_dim)
        self.dropout = nn.Dropout(dropout)

    def forward(self, text, text_lengths):
        embedded = self.dropout(self.embedding(text))
        packed_embedded = nn.utils.rnn.pack_padded_sequence(embedded, text_lengths, batch_first=True)
        packed_output, (hidden, cell) = self.lstm(packed_embedded)
        hidden = self.dropout(torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim=1))
        output = self.fc(hidden)
        return output

# 设置参数
vocab_size = len(text_field.vocab)
embedding_dim = 100
hidden_dim = 256
output_dim = 1
n_layers = 2
bidirectional = True
dropout = 0.5

# 初始化模型并设置优化器和损失函数
model = TextClassifier(vocab_size, embedding_dim, hidden_dim, output_dim, n_layers, bidirectional, dropout)
optimizer = optim.Adam(model.parameters())
criterion = nn.BCEWithLogitsLoss()

# 将模型和数据移到GPU上（如果可用）
model = model.to(torch.device('cuda' if torch.cuda.is_available() else 'cpu'))
criterion = criterion.to(torch.device('cuda' if torch.cuda.is_available() else 'cpu'))

# 训练模型
def train(model, iterator, optimizer, criterion):
    model.train()
    for batch in iterator:
        optimizer.zero_grad()
        text = batch.text
        text_lengths = batch.text[1].to(torch.device('cpu'))
        predictions = model(text, text_lengths).squeeze(1)
        loss = criterion(predictions, batch.label)
        loss.backward()
        optimizer.step()

# 测试模型
def evaluate(model, iterator, criterion):
    model.eval()
    epoch_loss = 0
    with torch.no_grad():
        for batch in iterator:
            text= batch.text
            text_lengths = batch.text[1].to(torch.device('cpu'))
            predictions = model(text, text_lengths).squeeze(1)
            loss = criterion(predictions, batch.label)
            epoch_loss += loss.item()
    return epoch_loss / len(iterator)

# 开始训练和测试
num_epochs = 1

for epoch in range(num_epochs):
    train(model, train_iterator, optimizer, criterion)
    train_loss = evaluate(model, train_iterator, criterion)
    test_loss = evaluate(model, test_iterator, criterion)
    print(f'Epoch: {epoch+1}\tTrain Loss: {train_loss:.4f}\tTest Loss: {test_loss:.4f}')
