import random, numpy as np
import torch, torch.nn as nn
import shutil, logging, json
from transformers import BertTokenizer, BertConfig
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
from dataloader import QNLIDataset
from lowrank_method import svd_decompose, hessian_decompose
from train import train_model, evaluate_model
from model_lowrank import obtain_model, obtain_model_lowrank, calculate_model_size, svd_compress_model
from model_distillation import knowledge_distillation, output_distribution
import warnings
warnings.filterwarnings("ignore", category=FutureWarning)


def main(bert_path, bert_path_lowrank, data_name, config_path):

    # Set dataloader
    tokenizer = BertTokenizer.from_pretrained(bert_path)
    train_dataset = QNLIDataset("./glue_data/QNLI/train.tsv", tokenizer, num_samples=100)
    test_dataset = QNLIDataset("./glue_data/QNLI/dev.tsv", tokenizer, num_samples=200)
    train_dataloader = DataLoader(train_dataset, batch_size=10, shuffle=False)
    test_dataloader = DataLoader(test_dataset, batch_size=20, shuffle=False)
    # Set device
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # 加载原模型
    model, tokenizer_M = obtain_model(bert_path, device, num_labels=2)
    # 读取训练好的原模型参数
    model.load_state_dict(
        torch.load("./model/model_trained/qnli_model.pth")
    )

    # model_size = calculate_model_size(model)
    # acc = evaluate_model(model, test_dataloader, device)
    
    # # 对模型进行低秩分解
    # with open(config_path, 'r') as f:
    #     config = json.load(f)
    # hidden_size = config['hidden_size']
    # layer_rank = config['layer_rank']
    # model_low = svd_compress_model(model, hidden_size, layer_rank, data_name)

    # 保存低秩分解后的模型
    # torch.save(model_low, "./model/model_lowrank/qnli_model_lowrank.pth")
    # for name, param in model_low.items():
    #     print(name, param.shape)
    
    # 重新加载低秩模型
    model_lowrank, tokenizer_M = obtain_model_lowrank(bert_path_lowrank, device, config_path, num_labels=2)
    # 读取保存的低秩模型参数
    model_lowrank.load_state_dict(
        torch.load("./model/model_lowrank/qnli_model_lowrank.pth"),
        strict=False
    )
    # model_lowrank_size = calculate_model_size(model_lowrank)
    # acc_lowrank = evaluate_model(model_lowrank, test_dataloader, device)

    # print("原模型大小: ", model_size)
    # print("原模型准确率: ", acc)
    # print("低秩模型大小: ", model_lowrank_size)
    # print("低秩模型准确率: ", acc_lowrank)

    # 进行知识蒸馏
    student_model = model_lowrank
    teacher_model = model
    knowledge_distillation(teacher_model, student_model, train_dataloader, test_dataloader)
    output_distribution(teacher_model, student_model, test_dataloader)
    
    return 0

if __name__ == "__main__":
    bert_path = "./model/bert_en"
    bert_path_lowrank = "./model/bert_lowrank"
    data_name = "QNLI"
    config_path = "./model/bert_lowrank/config_lowrank.json"
    main(bert_path, bert_path_lowrank, data_name, config_path)