import torch
import torch.nn as nn
import ch_en_DataLoader as data_loader
import matplotlib.pyplot as plt
import time
from torch import Tensor
from typing import Tuple
from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction

class Seq2SeqEncoder(nn.Module):
    def __init__(self, vocab_size, embed_size, num_hiddens, num_layers, dropout=0.0, **kwargs):
        super().__init__(**kwargs)
        self.embedding = nn.Embedding(vocab_size, embed_size)
        self.rnn = nn.GRU(embed_size, num_hiddens, num_layers, dropout=dropout)

    def forward(self, X):
        X = self.embedding(X)
        X = X.permute(1, 0, 2)
        output, state = self.rnn(X)
        return output, state


class Seq2SeqDecoder(nn.Module):
    def __init__(self, vocab_size: int, embed_size: int, num_hiddens: int, num_layers: int, dropout: float = 0.0, **kwargs):
        super().__init__(**kwargs)
        self.embedding = nn.Embedding(vocab_size, embed_size)
        self.rnn = nn.GRU(num_hiddens + embed_size, num_hiddens, num_layers, dropout=dropout)
        self.dense = nn.Linear(num_hiddens, vocab_size)

    def init_state(self, enc_outputs: Tuple[Tensor, Tensor]):
        return enc_outputs[1]

    def forward(self, X, state: Tensor) -> Tuple[Tensor, Tensor]:
        X = self.embedding(X)
        X = X.permute(1, 0, 2)
        context = state[-1].repeat(X.shape[0], 1, 1)
        X_and_context = torch.cat((X, context), 2)
        output, state = self.rnn(X_and_context, state)
        output = self.dense(output).permute(1, 0, 2)
        return output, state


class EncoderDecoder(nn.Module):
    def __init__(self, encoder, decoder):
        super().__init__()
        self.encoder = encoder
        self.decoder = decoder

    def forward(self, enc_input, dec_input):
        enc_output = self.encoder(enc_input)
        dec_state = self.decoder.init_state(enc_output)
        return self.decoder(dec_input, dec_state)


def sequence_mask(X, valid_len, value=0):
    maxlen = X.shape[1]
    arange_tensor = torch.arange(0, maxlen, dtype=torch.float32, device=X.device)
    reshaped_arange = arange_tensor.reshape(1, -1)
    reshaped_valid_len = valid_len.reshape(-1, 1)
    mask = reshaped_arange < reshaped_valid_len
    X[~mask] = value
    return X


class MaskedSoftmaxCELoss(nn.CrossEntropyLoss):
    def forward(self, pred, label, valid_len):
        weights = torch.ones_like(label)
        weights = sequence_mask(weights, valid_len)
        self.reduction = 'none'
        unweighted_loss = super().forward(pred.permute(0, 2, 1), label)
        weighted_loss = (unweighted_loss * weights).mean(dim=1)
        return weighted_loss


def grad_clipping(net, theta):
    if isinstance(net, nn.Module):
        params = [p for p in net.parameters() if p.requires_grad]
    else:
        params = net.params
    norm = torch.sqrt(sum(torch.sum((p.grad ** 2)) for p in params))
    if norm > theta:
        for param in params:
            param.grad[:] *= theta / norm


class Timer:
    def __init__(self):
        self.times = []
        self.start()

    def start(self):
        self.tik = time.time()

    def stop(self):
        self.times.append(time.time() - self.tik)
        return self.times[-1]


class Accumulator:
    def __init__(self, n):
        self.data = [0.0] * n

    def add(self, *args):
        self.data = [a + float(b) for a, b in zip(self.data, args)]

    def __getitem__(self, idx):
        return self.data[idx]


def train_seq2seq(net, data_loader, lr, num_epochs, tar_vocab, device, batch_size, source, target):
    x = [i for i in range(num_epochs)]
    y = []

    def xavier_init_weight(m):
        if type(m) == nn.Linear:
            nn.init.xavier_uniform_(m.weight)
        if type(m) == nn.GRU:
            for param in m._flat_weights_names:
                if "weight" in param:
                    nn.init.xavier_uniform_(m._parameters[param])

    net.apply(xavier_init_weight)
    net.to(device)
    optimizer = torch.optim.Adam(net.parameters(), lr=lr)
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=50)
    loss = MaskedSoftmaxCELoss()
    net.train()
    for epoch in range(num_epochs):
        data_iter = data_loader.load_data(batch_size, source, target, num_workers=13, pin_memory=True)
        timer = Timer()
        metric = Accumulator(2)
        batch_count = 0
        bleu_scores = []
        smoothie = SmoothingFunction()
        for batch in data_iter:
            optimizer.zero_grad()
            X, X_valid_len, Y, Y_valid_len = [x.to(device) for x in batch]

            bos = torch.tensor([tar_vocab['<bos>']] * Y.shape[0], device=device).reshape(-1, 1)
            dec_input = torch.cat([bos, Y[:, :-1]], 1)

            Y_hat, _ = net(X, dec_input)
            l = loss(Y_hat, Y, Y_valid_len)
            l.sum().backward()
            grad_clipping(net, 1)
            optimizer.step()
            num_token = Y_valid_len.sum()
            with torch.no_grad():
                metric.add(l.sum(), num_token)
            batch_count += 1
            batch_loss = l.sum() / num_token
            if batch_count % 500 == 0:
                print(f'Epoch {epoch + 1}, Batch {batch_count}, Loss: {batch_loss.item():.3f}, lr:{lr}')

            if batch_count % 500 == 0:
                net.eval()
                bleu_total = 0
                for i in range(min(10, Y.shape[0])):  # 取前 10 个样本计算 BLEU
                    src_sentence = ' '.join([source[i][j] for j in range(len(source[i])) if source[i][j] != '<pad>'])
                    ref = [tar_vocab.to_tokens(Y[i].tolist())]
                    translation = predict(net, 6, src_sentence, en_vocab, ch_vocab, device)
                    bleu = sentence_bleu(ref, translation.split(), smoothing_function=smoothie.method1)
                    bleu_total += bleu
                avg_bleu = bleu_total / min(10, Y.shape[0])
                bleu_scores.append(avg_bleu)
                print(f'Epoch {epoch + 1}, Batch {batch_count}, Average BLEU: {avg_bleu:.3f}')
                net.train()

        epoch_loss = metric[0] / metric[1]
        y.append(epoch_loss)
        print(f'Epoch {epoch + 1}, Epoch Loss: {epoch_loss:.3f}')

        scripted_net = torch.jit.script(net)
        scripted_net.save("ch_en_model.pth")
        scheduler.step()

    plt.plot(x, y)
    plt.xlabel('epochs')
    plt.ylabel('loss')
    plt.show()
    print(f'Final loss {metric[0] / metric[1]:.3f}, {metric[1] / timer.stop():.1f} '
          f'tokens/sec on {str(device)}')


def predict(net, num_steps, src_sentence, src_vocab, tar_vocab, device, save_attention_weights=False):
    net.eval()
    src_tokens = src_vocab[src_sentence.lower().split(' ')] + [src_vocab['<eos>']]
    src_tokens = data_loader.truncate_pad(src_tokens, num_steps, src_vocab['<pad>'])
    enc_input = torch.unsqueeze(torch.tensor(src_tokens, dtype=torch.long, device=device), 0)
    enc_output = net.encoder(enc_input)
    dec_state = net.decoder.init_state(enc_output)
    dec_input = torch.tensor([tar_vocab['<bos>']], dtype=torch.long, device=device).unsqueeze(0)
    output_seq = []
    for _ in range(num_steps):
        Y, dec_state = net.decoder(dec_input, dec_state)
        dec_input = Y.argmax(dim=2)
        pred = dec_input.squeeze(0).type(torch.int32).item()
        if pred == tar_vocab['<eos>']:
            break
        output_seq.append(pred)
    return ' '.join(tar_vocab.to_tokens(output_seq))


if __name__ == "__main__":
    if torch.cuda.is_available():
        try:
            device_count = torch.cuda.device_count()
            assert device_count > 0, "No CUDA devices available."
            device = torch.device("cuda:0")
            test_tensor = torch.tensor([1.0], device=device)
            print(f"Using GPU device 0: {torch.cuda.get_device_name(device)}")
        except Exception as e:
            print(f"CUDA device 0 unavailable: {e}, falling back to CPU.")
            device = torch.device("cpu")
    else:
        device = torch.device("cpu")
    print(f"Using device: {device}")
    print("hh")

    embed_size, num_hiddens, num_layers, dropout = 256, 128, 2, 0.1
    batch_size = 128
    lr, num_epochs = 0.002, 100

    en_tokensized, ch_tokensized = data_loader.getdata('en_tokenized.json', 'ch_tokenized.json')
    source, target, en_vocab, ch_vocab = data_loader.load_vocab(en_tokensized, ch_tokensized)
    encoder = Seq2SeqEncoder(len(en_vocab), embed_size, num_hiddens, num_layers, dropout)
    decoder = Seq2SeqDecoder(len(ch_vocab), embed_size, num_hiddens, num_layers, dropout)
    net = EncoderDecoder(encoder, decoder)
    train_seq2seq(net, data_loader, lr, num_epochs, ch_vocab, device, batch_size, en_tokensized, ch_tokensized)

    scripted_net = torch.jit.script(net)
    scripted_net.save("ch_en_model.pth")

    engs = ['go .', "i lost .", 'he\'s calm .', 'i\'m home .']
    num_steps = 6
    for eng in engs:
        translation = predict(net, num_steps, eng, en_vocab, ch_vocab, device)
        print(f'{eng} => {translation}')
    