import os
import sys
from torchinfo import summary
import my_bert
import torch
from sklearn.metrics import f1_score
from torch.nn import CrossEntropyLoss
from torch.optim import AdamW
from tqdm import tqdm
from bert_yt.b_config import b_conf
from bert_yt.my_bert import get_dataloader, MyDataset
from config import conf
from my_bert import bert_tokenizer, bert_config
from torch import nn

from utils import id2label, ensure_pdir_exist

root_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(root_path)


class StudentModel(nn.Module):
    def __init__(self, input_size=128, hidden_size=512, num_layers=2, dropout=0.3):
        super().__init__()
        self.embedding = nn.Embedding(bert_tokenizer.vocab_size, input_size)
        self.bilistm = nn.LSTM(input_size, hidden_size, num_layers=num_layers, bidirectional=True, batch_first=True)
        self.distill_hidden = nn.Linear(hidden_size * 2, bert_config.hidden_size)
        self.fc = nn.Linear(hidden_size * 2, len(id2label))
        self.dropout = nn.Dropout(dropout)

    def forward(self, input_ids, attention_mask, return_hidden=False):
        embedded = self.embedding(input_ids)
        attention_mask.unsqueeze_(-1)
        embedded = embedded * attention_mask
        lstm_out, _ = self.bilistm(embedded)
        hidden = lstm_out[:, -1]
        hidden = self.dropout(hidden)
        logits = self.fc(hidden)
        if return_hidden:
            return logits, self.distill_hidden(hidden)
        return logits


def distill(distill_loss, model_path, img_path, is_middle_layer_distill=False):
    teacher = my_bert.get_model()
    student = StudentModel()
    teacher.to(conf.device)
    student.to(conf.device)
    teacher.eval()
    student.train()

    train_dataloader = get_dataloader(MyDataset(conf.train_path), batch_size=128)
    val_dataloader = get_dataloader(MyDataset(conf.val_path), batch_size=256)

    optimizer = AdamW(student.parameters(), lr=1e-3)

    max_f1 = 0.0
    train_f1_list = []
    val_f1_list = []
    epoches = 15
    x_label = []
    early_stopping = 5
    no_improvement_count = 0

    for epoch in range(epoches):
        total_loss = 0.0
        total_item = 0
        pred_list = []
        label_list = []

        for input_ids, attention_mask, labels in tqdm(train_dataloader):
            input_ids, attention_mask = (input_ids.to(conf.device, non_blocking=True),
                                         attention_mask.to(conf.device, non_blocking=True))
            if is_middle_layer_distill:
                with torch.no_grad():
                    teacher_logits, teacher_hidden = teacher(input_ids, attention_mask, return_hidden=True)
                student_logits, student_hidden = student(input_ids, attention_mask, return_hidden=True)
                loss, teacher_labels = distill_loss(student_logits, teacher_logits, student_hidden, teacher_hidden)
            else:
                with torch.no_grad():
                    teacher_logits = teacher(input_ids, attention_mask)
                student_logits = student(input_ids, attention_mask)
                loss, teacher_labels = distill_loss(student_logits, teacher_logits)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            total_loss += loss * len(input_ids)
            total_item += len(input_ids)
            pred_list += student_logits.argmax(dim=1).tolist()
            label_list += teacher_labels.tolist()

        batch_final_f1 = f1_score(label_list, pred_list, average='micro')
        eval_f1 = validata_on_val(student, val_dataloader)
        train_f1_list.append(batch_final_f1)
        val_f1_list.append(eval_f1)
        x_label.append(epoch + 1)
        my_bert.pretty_draw(train_f1_list, val_f1_list, x_label, path=img_path, x_label_desc='epoch')
        print(f'epoch={epoch + 1}, loss={total_loss / total_item:.4f}, '
              f'f1={batch_final_f1:.4f}, eval_f1={eval_f1:.4f}')
        if eval_f1 > max_f1:
            max_f1 = eval_f1
            ensure_pdir_exist(model_path)
            print(f'更新验证集最大f1={max_f1}，保存模型')
            torch.save(student.state_dict(), model_path)
            no_improvement_count = 0
        else:
            no_improvement_count += 1
            if no_improvement_count >= early_stopping:
                print(f'验证集f1没有提升{early_stopping}次，提前结束训练')
                break
        print(f'max_f1={max_f1}')
    pass


class HardLabelLoss(nn.Module):
    def __init__(self):
        super().__init__()
        self.loss = CrossEntropyLoss()

    def forward(self, student_logits, teacher_logits):
        teacher_labels = teacher_logits.argmax(dim=1)
        loss = self.loss(student_logits, teacher_labels)
        return loss, teacher_labels


class SoftLabelLoss(nn.Module):
    def __init__(self, temperature=4.0, alpha=0.5):
        super().__init__()
        self.temperature = temperature
        self.alpha = alpha
        self.hard_loss = HardLabelLoss()

    def forward(self, student_logits, teacher_logits):
        hard_loss, teacher_labels = self.hard_loss(student_logits, teacher_logits)
        soft_loss = torch.kl_div(
            torch.log_softmax(student_logits / self.temperature, dim=1),
            torch.softmax(teacher_logits / self.temperature, dim=1),
            reduction=1
        ) * self.temperature ** 2
        loss = self.alpha * soft_loss + (1 - self.alpha) * hard_loss
        return loss, teacher_labels


class MiddleLayerLoss(nn.Module):
    def __init__(self, temperature=4.0, alpha=0.5, middle_weight=0.5):
        super().__init__()
        self.soft_loss = SoftLabelLoss(temperature, alpha)
        self.middle_weight = middle_weight
        self.middle_layer_criterion = nn.MSELoss()

    def forward(self, student_logits, teacher_logits, student_hidden, teacher_hidden):
        soft_loss, teacher_labels = self.soft_loss(student_logits, teacher_logits)
        middle_layer_loss = self.middle_layer_criterion(student_hidden, teacher_hidden)
        loss = self.middle_weight * middle_layer_loss + (1 - self.middle_weight) * soft_loss
        return loss, teacher_labels


def validata_on_val(student: nn.Module, val_dataloader):
    old_training = student.training
    pred_list = []
    label_list = []
    with (torch.no_grad()):
        student.eval()
        for input_ids, attention_mask, labels in val_dataloader:
            input_ids, attention_mask = (input_ids.to(conf.device, non_blocking=True),
                                         attention_mask.to(conf.device, non_blocking=True))
            logits = student(input_ids, attention_mask)
            pred_list += logits.argmax(dim=1).tolist()
            label_list += labels.tolist()
    student.train(old_training)
    return f1_score(label_list, pred_list, average='micro')


def evaluate(model_path):
    model = StudentModel()
    model.load_state_dict(torch.load(model_path, map_location=conf.device_str))
    model.to(conf.device)
    my_bert.evaluate(model)
    pass


def ana_param_quantity():
    teacher = my_bert.get_model()
    student = StudentModel()
    summary(teacher, input_size=[(1, 512), (1, 512)], dtypes=[torch.long, torch.long])
    # Trainable params: 102,275,338
    summary(student, input_size=[(1, 512), (1, 512)], dtypes=[torch.long, torch.long])
    # Trainable params: 12,431,114
    pass


def do():
    # ana_param_quantity()
    my_bert.evaluate()
    # batch_size=2048，每秒处理数=230
    # F1-score：0.9543
    # distill(HardLabelLoss(), b_conf.distill_hard_model_path, b_conf.distill_hard_f1_img_path)
    # epoch=7, loss=0.1087, f1=0.9681, eval_f1=0.9368
    evaluate(b_conf.distill_hard_model_path)
    # batch_size=2048，每秒处理数=540
    # F1-score：0.9407
    # distill(SoftLabelLoss(), b_conf.distill_soft_model_path, b_conf.distill_soft_f1_img_path)
    # epoch=7, loss=0.1687, f1=0.9640, eval_f1=0.9420
    # evaluate(b_conf.distill_soft_model_path)
    # F1-score：0.9339
    # distill(MiddleLayerLoss(), b_conf.distill_middle_model_path, b_conf.distill_middle_f1_img_path,
    #         is_middle_layer_distill=True)
    # epoch=8, loss=0.1477, f1=0.9654, eval_f1=0.9256
    # evaluate(b_conf.distill_middle_model_path)
    # F1-score：0.9337
    pass


if __name__ == '__main__':
    do()
