import torch.optim
from torch import nn
from torch.utils.data import random_split, DataLoader

from data import myData, collect_fn
from softmaskedBert import biGruDetector, softMaskedBert
from softmaskedBert import bert


class Coach(bert):
    def __init__(self):
        super(Coach, self).__init__()
        dataset = myData()
        self.config = None
        train_size = int(len(dataset) * 0.8)
        test_size = int(len(dataset) * 0.2)
        train_dataset , test_dataset = random_split(dataset, [train_size, test_size])
        self.train_data_loader = DataLoader(dataset=train_dataset, batch_size=16, collate_fn=collect_fn, shuffle=True)
        self.test_data_loader = DataLoader(dataset=test_dataset, batch_size=16, collate_fn=collect_fn, shuffle=True)
        # 监测模型实例化
        self.detector_model = biGruDetector(input_size=786, hidden_size=50)
        self.D_op = torch.optim.Adam(self.detector_model.parameters(), lr=1e-3)
        self.D_loss = nn.CrossEntropyLoss()
        # 纠错模型实例化
        self.c_model = softMaskedBert(
                                      vocab_size= self.vocab_size,
                                      mask_e = self.mask_e.to("cuda:0"),
                                      bert_encoder = self.bert_encoder
                                      )
        self.C_op = torch.optim.Adam(self.c_model.parameters(), lr=1e-3)
        self.C_loss = nn.NLLLoss()
        self.epoch = 20

        self.gama = 0.6
        self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    def train_model(self, keep):
        if keep:
            detector_model = torch.load('./checkponts/detector_model').to(self.device)
            model = torch.load('./checkponts/model').to(self.device)
        else:
            detector_model = self.detector_model.to(self.device)
            model = self.c_model.to(self.device)
        detector_model.train()
        model.train()
        for epoch  in range(self.epoch):
            total_loss = 0
            for i, batch_data in enumerate(self.train_data_loader):
                batch_inp_ids, batch_oup_ids, batch_labels, batch_mask = batch_data

                batch_oup_ids.to(self.device)
                batch_labels.to(self.device)
                batch_mask.to(self.device)
                batch_inp_embedding = self.embedding(batch_inp_ids).to(self.device)
                prob = detector_model(batch_inp_embedding)
                detector_loss = self.D_loss(prob.squeeze(), batch_labels.float())
                out = model(
                    batch_inp_embedding,
                    prob,
                    self.bert.get_extend_attention_mask(
                        batch_mask,
                        batch_oup_ids.shape,
                        self.device
                    )
                )
                # [batch*max_len, vocab_size]
                # [1, vocab_size]
                model_loss = self.C_loss((out*batch_mask.unsqueeze(-1).reshape(-1, out.shape(-1))), batch_oup_ids.reshape(-1))
                loss = self.gama * model_loss + (1- self.gama) * detector_loss
                self.C_op.zero_grad()
                loss.backward(retain_graph = True)
                self.C_op.step()
            if (epoch +1 )% 5 ==0:
                torch.save(detector_model, f'./checkponts/detector_model{epoch}')
                torch.save(model, f'./checkponts/model{epoch}')






