import torch
import torch.nn as nn
import torch.optim as optim
from torchtext.data import Field, BucketIterator, TabularDataset
from torch.nn import TransformerDecoder, TransformerDecoderLayer

# 准备数据集
QUESTION = Field(tokenize="spacy", lower=True, init_token='<sos>', eos_token='<eos>')
ANSWER = Field(tokenize="spacy", lower=True, init_token='<sos>', eos_token='<eos>')

fields = [('question', QUESTION), ('answer', ANSWER)]
train_data, test_data = TabularDataset.splits(
    path='data',
    train='train_data.csv',
    test='test_data.csv',
    format='csv',
    fields=fields
)

# 构建模型
class QADecoder(nn.Module):
    def __init__(self, vocab_size, embedding_dim, hidden_dim, num_layers):
        super(QADecoder, self).__init__()
        self.embedding = nn.Embedding(vocab_size, embedding_dim)
        self.decoder = TransformerDecoder(TransformerDecoderLayer(embedding_dim, nhead=8), num_layers)
        self.fc = nn.Linear(embedding_dim, vocab_size)

    def forward(self, trg, memory, trg_mask):
        embedded = self.embedding(trg)
        output = self.decoder(embedded, memory, tgt_mask=trg_mask)
        output = self.fc(output)
        return output

# 定义超参数
VOCAB_SIZE = len(QUESTION.vocab)
EMBEDDING_DIM = 256
HIDDEN_DIM = 512
NUM_LAYERS = 2
LEARNING_RATE = 0.001
N_EPOCHS = 10

# 初始化模型和优化器
model = QADecoder(VOCAB_SIZE, EMBEDDING_DIM, HIDDEN_DIM, NUM_LAYERS)
optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE)
criterion = nn.CrossEntropyLoss()

# 训练模型
for epoch in range(N_EPOCHS):
    for batch in train_iterator:
        question = batch.question
        answer = batch.answer

        optimizer.zero_grad()
        output = model(question, answer)
        output_dim = output.shape[-1]

        output = output.view(-1, output_dim)
        answer = answer.view(-1)

        loss = criterion(output, answer)
        loss.backward()
        optimizer.step()

# 评估模型
def evaluate(model, iterator, criterion):
    model.eval()
    epoch_loss = 0

    with torch.no_grad():
        for batch in iterator:
            question = batch.question
            answer = batch.answer

            output = model(question, answer)
            output_dim = output.shape[-1]

            output = output.view(-1, output_dim)
            answer = answer.view(-1)

            loss = criterion(output, answer)
            epoch_loss += loss.item()

    return epoch_loss / len(iterator)

test_loss = evaluate(model, test_iterator, criterion)
print(f'Test Loss: {test_loss:.3f}')
