import sys, os, time
import random, numpy as np
import torch, torch.nn as nn
import shutil, logging, json
from tqdm import tqdm
from torch.utils.data import DataLoader, Dataset
from transformers import BertTokenizer, BertConfig
# from transformers import BertForSequenceClassification, AdamW
from collections import OrderedDict

from dataloader import QNLIDataset
from BaseModel.modeling_bert import BertForSequenceClassification
from BaseModel.modeling_bert_lowrank import BertForSequenceClassificationLowRank
from lowrank_method import svd_decompose, hessian_decompose
from train import train_model, evaluate_model
import warnings
warnings.filterwarnings("ignore", category=FutureWarning)

def obtain_model(bert_path, device, num_labels = 2, lr_model = 5e-5):
    bert_config = BertConfig.from_pretrained(bert_path, num_labels=num_labels)
    tokenizer_M = BertTokenizer.from_pretrained(bert_path)
    bert_config.num_labels = num_labels
    bert_config.hidden_act = "relu"
    tokenizer_M.model_max_length = 128
    # Create the model
    bert = BertForSequenceClassification.from_pretrained(bert_path, config = bert_config).to(device)
    bert.learning_rate = lr_model
    return bert, tokenizer_M

def obtain_model_lowrank(bert_path_lowrank, device, config_path, num_labels = 2, lr_model = 5e-5):
    with open(config_path, 'r') as f:
        custom_config = json.load(f)
    bert_config = BertConfig.from_pretrained(bert_path_lowrank, num_labels=num_labels)
    tokenizer_M = BertTokenizer.from_pretrained(bert_path_lowrank)
    bert_config.num_labels = num_labels
    bert_config.hidden_act = "relu"
    bert_config.layer_rank = custom_config['layer_rank']
    tokenizer_M.model_max_length = 128
    # Create the model
    bert_lowrank = BertForSequenceClassificationLowRank.from_pretrained(
        bert_path_lowrank,
        config=bert_config
    ).to(device)
    bert_lowrank.learning_rate = lr_model
    return bert_lowrank, tokenizer_M



def calculate_model_size(model):
    torch.save(model.state_dict(), "model_size.p")
    model_size = os.path.getsize("model_size.p")/1e6
    os.remove('model_size.p')
    return model_size

# svd压缩拷贝模型生成一个OrderDict
def svd_compress_model(model, hidden_size, layer_rank, data_name):

    total_storage = 0
    total_storage_original = 0
    all_time = 0
    params_to_modify = OrderedDict()
    flag = 0
    for name, param in model.named_parameters():
        for param_name in layer_rank.keys():
            if param_name in name and layer_rank[param_name] > 0 and layer_rank[param_name] < 1:
                flag = 1
                rank = int(layer_rank[param_name] * hidden_size)
                break
        if len(param.shape) == 2 and flag == 1:  # Check if the parameter是一个2D矩阵
            print("decompose ", name)
            flag = 0
            start_time = time.time()
            A, B = svd_decompose(param.data.clone(), rank)
            # A, B = hessian_decompose(newDomainName, name, param.data.clone(), rank)
            A, B = torch.tensor(A), torch.tensor(B)
            end_time = time.time()
            all_time = all_time + (end_time - start_time)
            KEY_NAME_A = name.replace('weight', 'weightA')
            KEY_NAME_B = name.replace('weight', 'weightB')
            params_to_modify[KEY_NAME_A] = A
            params_to_modify[KEY_NAME_B] = B
            # params_to_modify[KEY_NAME_A] = torch.tensor([1])
            # params_to_modify[KEY_NAME_B] = torch.tensor([1])
            total_storage += A.numel() * A.element_size()
            total_storage += B.numel() * B.element_size()          
            total_storage_original += param.numel() * param.element_size()
            # break
        else:
            flag = 0
            params_to_modify[name] = param
            total_storage += param.numel() * param.element_size()
            total_storage_original += param.numel() * param.element_size()           

    torch.save(params_to_modify, "./model/bert_lowrank/pytorch_model.bin")
    # print("new size : ", os.path.getsize("./model/bert_lowrank/pytorch_model.bin")/1e6)
    # print("original size : ", total_storage_original/1e6)
    # print(total_storage/1e6)

    return params_to_modify

def lowrank_to_model(bert_path, bert_path_lowrank, data_name, config_path):

    # Set dataloader
    tokenizer = BertTokenizer.from_pretrained(bert_path)
    train_dataset = QNLIDataset("./glue_data/QNLI/train.tsv", tokenizer, num_samples=200)
    test_dataset = QNLIDataset("./glue_data/QNLI/dev.tsv", tokenizer, num_samples=200)
    train_dataloader = DataLoader(train_dataset, batch_size=20, shuffle=True)
    test_dataloader = DataLoader(test_dataset, batch_size=20, shuffle=False)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model, tokenizer_M = obtain_model(bert_path, device, num_labels=2)

    model.load_state_dict(
        torch.load("./model/model_trained/qnli_model.pth")
    )

    model_size = calculate_model_size(model)
    acc = evaluate_model(model, test_dataloader, device)
    
    # 对模型进行低秩分解
    with open(config_path, 'r') as f:
        config = json.load(f)
    hidden_size = config['hidden_size']
    layer_rank = config['layer_rank']

    model_low = svd_compress_model(
        model,
        hidden_size,
        layer_rank,
        data_name
    )

    # 报错低秩分解后的模型
    torch.save(model_low, "./model/model_lowrank/qnli_model_lowrank.pth")
    # for name, param in model_low.items():
    #     print(name, param.shape)
    

    # 重新加载低秩模型
    model_lowrank, tokenizer_M = obtain_model_lowrank(bert_path_lowrank, device, config_path, num_labels=2)
    # 读取保存的低秩模型参数
    model_lowrank.load_state_dict(
        torch.load("./model/model_lowrank/qnli_model_lowrank.pth"),
        strict=False
    )
    model_lowrank_size = calculate_model_size(model_lowrank)
    acc_lowrank = evaluate_model(model_lowrank, test_dataloader, device)

    print("原模型大小: ", model_size)
    print("原模型准确率: ", acc)
    print("低秩模型大小: ", model_lowrank_size)
    print("低秩模型准确率: ", acc_lowrank)
    
    return 0

