import torch
from torch import nn
from d2l import torch as d2l
import math
from PCA_tools import draw_vocab_pca
from custom import *
from gru_data_load import dataset, train_iter, vocab
from cizhui_chuli_gru import TypeTable


class Classifier(nn.Module):
    """The text classification model."""

    def __init__(self, **kwargs):
        super(Classifier, self).__init__(**kwargs)
        self.rnn_model = torch.load("pretrain_model.pth")
        self.embedding = self.rnn_model.embedding
        # print(self.embedding.weight.data.shape,"2222",dataset.max_length)
        self.dnn = nn.Sequential(nn.Linear(self.embedding.weight.data.shape[1] * dataset.max_length, 32),
                                 nn.ReLU(),
                                 nn.Linear(32, dataset.label_size))

    def forward(self, inputs):
        inputs = inputs.long()
        # print(padding_vector)
        # print(inputs)
        new_input = []
        for input in inputs:
            padding_vector = torch.zeros(30 - input.shape[0])
            expanded_vector = torch.cat((input, padding_vector)).unsqueeze(0).long()
            new_input.append(expanded_vector)

        expanded_vectors = torch.stack(new_input)
        sentence_emb = self.embedding(expanded_vectors)
        # print(sentence_emb.shape)
        sentence_emb = sentence_emb.reshape(sentence_emb.shape[0], -1)
        output = self.dnn(sentence_emb)
        return output


def train_one_epoch(net, train_iter, loss, updater, device):
    all_loss = 0
    # Train and predict
    for epoch in range(num_epochs):
        for X, Y in train_iter:
            # print("XXXXXXXXXXXXXXXXXXXXXXXXXX",X.shape,Y.shape)
            y = Y
            X, y = X.to(device), y.to(device)
            y_hat = net(X)
            # print("yyyyyyyyyyyyy",y_hat.shape)
            l = loss(y_hat.to(torch.float32), y.to(torch.float32))
            if isinstance(updater, torch.optim.Optimizer):
                updater.zero_grad()
                l.backward()
                # grad_clipping(net, 1)
                updater.step()
            else:
                l.backward()
                # grad_clipping(net, 1)
                # Since the `mean` function has been invoked
                updater(batch_size=1)
        all_loss += l
    print("完成一轮训练 loss ", all_loss / len(train_iter))


def predict_classifier(prefix, net, vocab, device):  # @save
    """类型判断"""
    output_list = []
    outputs = torch.LongTensor([vocab[prefix]])
    out = net(outputs)
    # print("outoutoutoutoutoutoutoutoutoutoutoutoutoutoutoutoutoutoutout",out)
    last_y = out[0]
    types = []
    last_y = torch.sigmoid(last_y)
    # print("last_y",last_y)
    for index in range(len(last_y)):
        if index == 0:
            if last_y[index] > 0.5:
                prefix_attr = "後綴"
            else:
                prefix_attr = "前綴"
        else:
            if last_y[index] > 0.5:
                types.append(TypeTable[index])

    return prefix, prefix_attr, types


try:
    num_epochs = 200
    lr = 0.01
    # loss = nn.CrossEntropyLoss()
    loss = nn.BCEWithLogitsLoss()
    vocab_size, num_hiddens, num_layers, input_size = len(vocab), 256, 2, 64
    num_inputs = input_size
    device = try_gpu()
    net = Classifier()
    net = net.to(device)
    # Initialize
    if isinstance(net, nn.Module):
        updater = torch.optim.SGD(net.parameters(), lr)
    else:
        updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size)

    predict = lambda prefix: predict_ch8(prefix, net, vocab, device)

    print(predict_classifier(['當你', '暴擊時', '有', '20%', '機率', '補充', '20', '充能'], net, vocab, device))
    for epoch in range(num_epochs):
        train_one_epoch(
            net, train_iter, loss, updater, device)
        if (epoch + 1) % 2 == 0:
            print(predict_classifier(['當你', '暴擊時', '有', '20%', '機率', '補充', '30', '充能'], net, vocab, device))
            print("前綴 暴擊")
            print(
                predict_classifier(
                    ['number%', '更少', '持續', '時間', '效果', '持續', '時間', '，', '免疫', '冰緩', '和', '冰凍'], net,
                    vocab, device))
            print("後綴 None")
            print(predict_classifier(['1', '個', '附加', '天賦', '為', '瘋', '狂面'], net, vocab, device))
            print("前綴 魔力 速度 光環")
            print(predict_classifier(['1', '個', '附加', '的', '天賦', '為', '火焰', '包覆'], net, vocab, device))
            print("前綴 傷害 元素 火焰")
    #         animator.add(epoch + 1, [ppl])
    # print(f'perplexity {ppl:.1f}, {speed:.1f} tokens/sec on {str(device)}')

except:
    import traceback

    traceback.print_exc()
