import os
import sys
import time
import numpy as np

import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import torchvision
from torchvision import datasets
import torchvision.transforms as transforms
from model.only_pnp_qat import SegUNet
from data_set.fpv_voc import Fpv

from tqdm import tqdm
from torch import nn
import time
from utils.metrics import MetricsCPU
def eval_model(model):
    test_data = Fpv('F:/fpv1024_3pixel_v2345', is_train=False)
    testloader = DataLoader(test_data, batch_size=4, shuffle=False, num_workers=4)
    me = MetricsCPU(n_classes=10)
    for img, target in tqdm(testloader):
        img = torch.autograd.Variable(img.cpu())
        target = torch.autograd.Variable(target.cpu())
        pred = model(img)
        pred = torch.softmax(pred, dim=1)
        pred = torch.argmax(pred, dim=1)
        me.update(pred, target)
    metr = me.compute_metrics()
    print(metr)





def ptq():
    num_calibration_batches = 32
    myModel = SegUNet(10)
    myModel.load_state_dict(torch.load('F:/xag_fpv_sig_wmsgd88.pth',map_location='cpu'), strict=True)
    myModel.eval()


    # Fuse Conv, bn and relu
    #myModel.fuse_model()

    # Specify quantization configuration
    # Start with simple min/max range estimation and per-tensor quantization of weights
    myModel.qconfig = torch.ao.quantization.default_qconfig
    print(myModel.qconfig)
    torch.ao.quantization.prepare(myModel, inplace=True)

    # Calibrate first
    print('Post Training Quantization Prepare: Inserting Observers')


    # Calibrate with the training set
    eval_model(myModel)
    print('Post Training Quantization: Calibration done')

    # Convert to quantized model
    torch.ao.quantization.convert(myModel, inplace=True)
    print('Post Training Quantization: Convert done')


    eval_model(myModel)
def tptq():
    import copy
    from torch.ao.quantization import get_default_qconfig
    from torch.ao.quantization.quantize_fx import convert_fx, prepare_fx
    from torchvision.models import resnet50
    fp32_model = resnet50().eval()
    model = copy.deepcopy(fp32_model)
    # `qconfig` means quantization configuration, it specifies how should we
    # observe the activation and weight of an operator
    # `qconfig_dict`, specifies the `qconfig` for each operator in the model
    # we can specify `qconfig` for certain types of modules
    # we can specify `qconfig` for a specific submodule in the model
    # we can specify `qconfig` for some functioanl calls in the model
    # we can also set `qconfig` to None to skip quantization for some operators
    example_inputs = (torch.randn(1, 3, 224, 224),)
    qconfig = get_default_qconfig("fbgemm")
    qconfig_dict = {"": qconfig}
    # `prepare_fx` inserts observers in the model based on the configuration in `qconfig_dict`
    model_prepared = prepare_fx(model, qconfig_dict,example_inputs)
    # calibration runs the model with some sample data, which allows observers to record the statistics of
    # the activation and weigths of the operators
    calibration_data = [torch.randn(1, 3, 224, 224) for _ in range(100)]
    for i in range(len(calibration_data)):
        model_prepared(calibration_data[i])
    # `convert_fx` converts a calibrated model to a quantized model, this includes inserting
    # quantize, dequantize operators to the model and swap floating point operators with quantized operators
    model_quantized = convert_fx(copy.deepcopy(model_prepared))
    # benchmark
    x = torch.randn(1, 3, 224, 224)
    t = time.time()
    fp32_model(x)
    print(time.time()-t)

    model_quantized(x)
    print(time.time()-t)

if __name__ == '__main__':
    tptq()