import os
import sys

root_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(root_path)
import time
import torch
from sklearn.metrics import f1_score
from tqdm import tqdm
from bert_yt.b_config import b_conf
from config import conf
from bert_yt.my_bert import BertModel, MyDataset, get_dataloader


def load_model():
    model = BertModel()
    model.load_state_dict(torch.load(b_conf.model_path, map_location='cpu'))
    return model


def dq():
    model = load_model()
    model.eval()
    quantized_model = torch.quantization.quantize_dynamic(model, {torch.nn.Linear}, dtype=torch.qint8)
    torch.save(quantized_model, b_conf.q_model_path)


def load_q_model():
    model = torch.load(b_conf.q_model_path, weights_only=False, map_location='cpu')
    return model


def compare():
    model = load_model()
    model.to(conf.device)
    q_model = load_q_model()
    model.eval()
    q_model.eval()

    dataset = MyDataset(conf.test_path)
    dataloader = get_dataloader(dataset)

    pred_list = []
    q_pred_list = []
    label_list = []
    q_label_list = []

    cost_list1 = []
    cost_list2 = []
    batch_size_list = []
    with torch.no_grad():
        for i, (input_ids, attention_mask, labels) in enumerate(tqdm(dataloader)):
            print(f'q_model {i}')
            start2 = time.time()
            q_pred = q_model(input_ids, attention_mask)
            # Segmentation fault (core dumped)可能是内存不足
            cost_list2.append(time.time() - start2)

            input_ids, attention_mask = input_ids.to(conf.device), attention_mask.to(conf.device)
            print(f'model {i}')
            start1 = time.time()
            pred = model(input_ids, attention_mask)
            cost_list1.append(time.time() - start1)

            pred_list += pred.argmax(dim=1).tolist()
            label_list += labels.tolist()
            q_pred_list += q_pred.argmax(dim=1).tolist()
            q_label_list += labels.tolist()

            batch_size_list.append(len(labels))

    f1 = f1_score(label_list, pred_list, average='micro')
    q_f1 = f1_score(q_label_list, q_pred_list, average='micro')
    print(f'F1-f1：{f1} q_f1:{q_f1}')
    print(f'batch_size_list:{batch_size_list}')
    print(f'cost_list1:{cost_list1} avg={sum(cost_list1) / len(cost_list1)}')
    print(f'cost_list2:{cost_list2} avg={sum(cost_list2) / len(cost_list2)}')
    """
    F1-f1：0.9543 q_f1:0.8773
batch_size_list:[1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 784]
cost_list1:[3.129713773727417, 0.022249698638916016, 0.0221405029296875, 0.022276639938354492, 0.02393054962158203, 0.02202749252319336, 0.022244930267333984, 0.022008657455444336, 0.021845102310180664, 0.018077850341796875] 
avg=0.33265151977539065
cost_list2:[59.91883659362793, 59.77771234512329, 61.34675312042236, 61.33645677566528, 63.41091537475586, 61.269145488739014, 60.704221963882446, 61.47505044937134, 65.38875603675842, 50.96879196166992] 
avg=60.55966401100159
就nm离谱。后续动态量化会在GPU上跑
    """


def _test_load_model():
    model = load_model()
    print(f'model加载成功={model}')
    q_model = load_q_model()
    print(f'q_model加载成功={q_model}')
    pass


if __name__ == '__main__':
    dq()
    # _test_load_model()
    compare()
    pass
