import torch
import torch.nn as nn
import torch.optim as optim
import random

# 1. 数据集：简单问答对
pairs = [
    ['hello', 'hi'],
    ['how are you', 'i am fine'],
    ['what is your name', 'i am a chatbot'],
    ['bye', 'goodbye'],
]

# 2. 构建词表
words = set()
for q, a in pairs:
    words.update(q.split())
    words.update(a.split())
words = ['<pad>', '<sos>', '<eos>'] + sorted(words)
word2idx = {w: i for i, w in enumerate(words)}
idx2word = {i: w for w, i in word2idx.items()}

def encode(sentence):
    return [word2idx['<sos>']] + [word2idx[w] for w in sentence.split()] + [word2idx['<eos>']]

def pad(seq, max_len):
    return seq + [word2idx['<pad>']] * (max_len - len(seq))

# 3. 构造训练数据
encoded_pairs = [(encode(q), encode(a)) for q, a in pairs]
max_len = max(max(len(q), len(a)) for q, a in encoded_pairs)
X = torch.tensor([pad(q, max_len) for q, a in encoded_pairs])
Y = torch.tensor([pad(a, max_len) for q, a in encoded_pairs])

# 4. 模型定义
class Encoder(nn.Module):
    def __init__(self, vocab_size, emb_dim, hidden_dim):
        super().__init__()
        self.embedding = nn.Embedding(vocab_size, emb_dim)
        self.rnn = nn.GRU(emb_dim, hidden_dim, batch_first=True)

    def forward(self, src):
        embedded = self.embedding(src)
        _, hidden = self.rnn(embedded)
        return hidden

class Decoder(nn.Module):
    def __init__(self, vocab_size, emb_dim, hidden_dim):
        super().__init__()
        self.embedding = nn.Embedding(vocab_size, emb_dim)
        self.rnn = nn.GRU(emb_dim, hidden_dim, batch_first=True)
        self.fc = nn.Linear(hidden_dim, vocab_size)

    def forward(self, input, hidden):
        input = input.unsqueeze(1)  # [B, 1]
        embedded = self.embedding(input)
        output, hidden = self.rnn(embedded, hidden)
        return self.fc(output.squeeze(1)), hidden

# 5. 初始化
vocab_size = len(words)
encoder = Encoder(vocab_size, emb_dim=32, hidden_dim=64)
decoder = Decoder(vocab_size, emb_dim=32, hidden_dim=64)

criterion = nn.CrossEntropyLoss(ignore_index=word2idx['<pad>'])
optimizer = optim.Adam(list(encoder.parameters()) + list(decoder.parameters()), lr=1e-3)

# 6. 训练
for epoch in range(300):
    total_loss = 0
    for i in range(X.size(0)):
        src, tgt = X[i], Y[i]
        encoder.zero_grad()
        decoder.zero_grad()
        hidden = encoder(src.unsqueeze(0))
        input_token = tgt[0].unsqueeze(0)  # <sos>
        loss = 0

        for t in range(1, tgt.size(0)):
            output, hidden = decoder(input_token, hidden)
            loss += criterion(output, tgt[t].unsqueeze(0))
            input_token = tgt[t].unsqueeze(0)

        loss.backward()
        optimizer.step()
        total_loss += loss.item()

    if epoch % 50 == 0:
        print(f"Epoch {epoch}, Loss: {total_loss:.4f}")

# 7. 聊天函数
def chat(sentence):
    encoder.eval()
    decoder.eval()
    with torch.no_grad():
        tokens = torch.tensor(pad(encode(sentence), max_len))
        hidden = encoder(tokens.unsqueeze(0))
        input_token = torch.tensor([word2idx['<sos>']])
        output_words = []
        for _ in range(max_len):
            output, hidden = decoder(input_token, hidden)
            top1 = output.argmax(1).item()
            if top1 == word2idx['<eos>']:
                break
            output_words.append(idx2word[top1])
            input_token = torch.tensor([top1])
        return ' '.join(output_words)

# 8. 示例对话
while True:
    user_input = input("You: ")
    if user_input == "quit":
        break
    response = chat(user_input.lower())
    print("Bot:", response)
