# encoding=utf-8
from typing import Callable, Union
from nni.common.types import SCHEDULER
import torch
import torch.nn.functional as F
from torch.optim import SGD
from nni_assets.compression.mnist_model import TorchModel, trainer, evaluator, device, train_loader, test_loader
import nni
import time
from nni.compression.quantization import QATQuantizer
from nni.compression.utils import TorchEvaluator

# define the model
model = TorchModel().to(device)

# show the model structure, note that pruner will wrap the model layer.
print(model)

# define the optimizer and criterion for pre-training
optimizer = SGD(model.parameters(), 1e-2)
criterion = F.nll_loss

# pre-train and evaluate the model on MNIST dataset
for epoch in range(5):
    trainer(model, optimizer, criterion)
    evaluator(model)

# quantization
"""
    这里的training_step和training_model实际上是配套的，这里使用原先的框架示例代码进行简化
    所以仅声明不使用
"""


def training_step(batch, model) -> torch.Tensor:
    x, y = batch[0].to(device), batch[1].to(device)
    logits = model(x)
    loss: torch.Tensor = F.nll_loss(logits, y)
    return loss


def training_model(model, optimizer, training_step, scheduler=None, max_steps=None, max_epochs=None):
    max_epochs = max_epochs if max_epochs else 1 if max_steps is None else 100

    # training
    for epoch in range(max_epochs):
        print(f'Epoch {epoch} start!')
        trainer(model, optimizer, criterion)


optimizer = nni.trace(SGD)(model.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4)
_evaluator = TorchEvaluator(training_model, optimizer, training_step)  # type: ignore

config_list = [{
    'op_names': ['conv1', 'conv2', 'fc1', 'fc2'],
    'target_names': ['_input_', 'weight', '_output_'],
    'quant_dtype': 'int8',
    'quant_scheme': 'affine',
    'granularity': 'default',
}, {
    'op_names': ['relu1', 'relu2', 'relu3'],
    'target_names': ['_output_'],
    'quant_dtype': 'int8',
    'quant_scheme': 'affine',
    'granularity': 'default',
}]

quantizer = QATQuantizer(model, config_list, _evaluator, len(train_loader))
real_input = next(iter(train_loader))[0].to(device)
quantizer.track_forward(real_input)

start = time.time()
evaluator(model)
print(f'before quantization evaluating: {time.time() - start}s')

start = time.time()
_, calibration_config = quantizer.compress(None, max_epochs=5)
print(f'pure training 5 epochs: {time.time() - start}s')

"""
defaultdict(<class 'dict'>, {'fc1': {'weight': {'scale': tensor(0.0015), 'zero_point': tensor(-4.), 
'quant_dtype': 'int8', 'quant_scheme': 'affine', 'quant_bits': 8, 'tracked_max': tensor(0.1945), 
'tracked_min': tensor(-0.1827)}, '_input_0': {'scale': tensor(0.0914), 'zero_point': tensor(-127.), 
'quant_dtype': 'int8', 'quant_scheme': 'affine', 'quant_bits': 8, 'tracked_max': tensor(23.2061), 
'tracked_min': tensor(0.)}, '_output_0': {'scale': tensor(0.1562), 'zero_point': tensor(6.), 
'quant_dtype': 'int8', 'quant_scheme': 'affine', 'quant_bits': 8, 'tracked_max': tensor(18.9741), 
'tracked_min': tensor(-20.6961)}}, 'conv1': {'weight': {'scale': tensor(0.0037), 
'zero_point': tensor(-39.), 'quant_dtype': 'int8', 'quant_scheme': 'affine', 'quant_bits': 8, 
'tracked_max': tensor(0.6097), 'tracked_min': tensor(-0.3233)}, '_input_0': {'scale': tensor(0.0128), 
'zero_point': tensor(-94.), 'quant_dtype': 'int8', 'quant_scheme': 'affine', 'quant_bits': 8, 
'tracked_max': tensor(2.8215), 'tracked_min': tensor(-0.4242)}, '_output_0': {'scale': tensor(0.0747), 
'zero_point': tensor(-17.), 'quant_dtype': 'int8', 'quant_scheme': 'affine', 'quant_bits': 8, 
'tracked_max': tensor(10.7354), 'tracked_min': tensor(-8.2418)}}, 'conv2': {'weight': {'scale': tensor(0.0022), 
'zero_point': tensor(-16.), 'quant_dtype': 'int8', 'quant_scheme': 'affine', 'quant_bits': 8, 
'tracked_max': tensor(0.3154), 'tracked_min': tensor(-0.2443)}, '_input_0': {'scale': tensor(0.0420), 
'zero_point': tensor(-127.), 'quant_dtype': 'int8', 'quant_scheme': 'affine', 'quant_bits': 8, 
'tracked_max': tensor(10.6718), 'tracked_min': tensor(0.)}, '_output_0': {'scale': tensor(0.1738), 
'zero_point': tensor(-10.), 'quant_dtype': 'int8', 'quant_scheme': 'affine', 'quant_bits': 8, 
'tracked_max': tensor(23.7782), 'tracked_min': tensor(-20.3621)}}, 'fc2': {'weight': {'scale': tensor(0.0016), 
'zero_point': tensor(-17.), 'quant_dtype': 'int8', 'quant_scheme': 'affine', 'quant_bits': 8, 
'tracked_max': tensor(0.2362), 'tracked_min': tensor(-0.1793)}, '_input_0': {'scale': tensor(0.0719), 
'zero_point': tensor(-127.), 'quant_dtype': 'int8', 'quant_scheme': 'affine', 'quant_bits': 8, 
'tracked_max': tensor(18.2665), 'tracked_min': tensor(0.)}, '_output_0': {'scale': tensor(0.1006), 
'zero_point': tensor(-32.), 'quant_dtype': 'int8', 'quant_scheme': 'affine', 'quant_bits': 8, 
'tracked_max': tensor(16.0239), 'tracked_min': tensor(-9.5366)}}, 'relu1': {'_output_0': {'scale': tensor(0.0421), 
'zero_point': tensor(-127.), 'quant_dtype': 'int8', 'quant_scheme': 'affine', 'quant_bits': 8, 
'tracked_max': tensor(10.6940), 'tracked_min': tensor(0.)}}, 'relu2': {'_output_0': {'scale': tensor(0.0923), 
'zero_point': tensor(-127.), 'quant_dtype': 'int8', 'quant_scheme': 'affine', 'quant_bits': 8, 
'tracked_max': tensor(23.4486), 'tracked_min': tensor(0.)}}, 'relu3': {'_output_0': {'scale': tensor(0.0729), 
'zero_point': tensor(-127.), 'quant_dtype': 'int8', 'quant_scheme': 'affine', 'quant_bits': 8, 
'tracked_max': tensor(18.5128), 'tracked_min': tensor(0.)}}})
"""
print(calibration_config)
start = time.time()
evaluator(model)
print(f'quantization evaluating: {time.time() - start}s')
