import torch
from resnet import ResNet34
import torch

input_batch = torch.randn(10, 200, 80)
def evaluate(model,input_batch, device_str='cuda'):
    
    if not (device_str in['cpu', 'cuda']):
        raise NotImplementedError("`device_str` should be 'cpu' or 'cuda' ")
    if device_str == 'cuda':
        assert torch.cuda.is_available(), 'Check CUDA is available'
    input_batch = input_batch.to(device_str)
    model.to(device_str)
    with torch.no_grad():
        output = model(input_batch)
    print(output)


model = ResNet34(feat_dim=80, embed_dim=256) # pretrained=True
#print(model)

# Step 1: architecture changes
# QuantStubs (we will do FloatFunctionals later)
# Done

# Step 2: fuse modules (recommended but not necessary)
# TODO
# modules_to_list = model.modules_to_fuse()

# It will keep Batchnorm
model.eval()
# fused_model = torch.ao.quantization.fuse_modules_qat(model, modules_to_list)

# This will fuse BatchNorm weights into the preceding Conv
# TODO
# fused_model = torch.ao.quantization.fuse_modules(model, modules_to_list)
fused_model = model

# Step 3: Assign qconfigs
from torch.ao.quantization.fake_quantize import FakeQuantize
activation_qconfig = FakeQuantize.with_args(
    observer=torch.ao.quantization.observer.HistogramObserver.with_args(
        quant_min=0,
        quant_max=255,
        dtype=torch.quint8,
        qscheme=torch.per_tensor_affine,
    )
)

weight_qconfig = FakeQuantize.with_args(
    observer=torch.ao.quantization.observer.PerChannelMinMaxObserver.with_args(
        quant_min=-128,
        quant_max=127,
        dtype=torch.qint8,
        qscheme=torch.per_channel_symmetric,
    )
    # observer=torch.ao.quantization.observer.MinMaxObserver.with_args(
    #     quant_min=-128,
    #     quant_max=127,
    #     dtype=torch.qint8,
    #     qscheme=torch.per_tensor_symmetric,
    # )
)

# qconfig = torch.quantization.QConfig(activation=activation_qconfig,
                                    #   weight=weight_qconfig)
# fused_model.qconfig = qconfig
fused_model.qconfig = torch.quantization.get_default_qconfig('x86')


# Step 4: Prepare for fake-quant
fused_model.eval()
fake_quant_model = torch.ao.quantization.prepare(fused_model) # _qat
# prepare
fake_quant_model(input_batch)

print("\nFloat")
evaluate(model, input_batch, 'cpu')


print("\nFused Model")
evaluate(fused_model, input_batch, 'cpu')


print("\nFake quant - PTQ")
evaluate(fake_quant_model, input_batch, 'cpu')

# fake_quant_model.apply(torch.ao.quantization.fake_quantize.disable_observer)

# print("\nFake quant - post-PTQ")
# evaluate(fake_quant_model, input_batch, 'cpu')


# Step 5: convert (true int8 model)
converted_model = torch.ao.quantization.convert(fake_quant_model)

print("\nConverted model")
evaluate(converted_model, input_batch, 'cpu')

import IPython; IPython.embed()
