import numpy as np
import torch
import torch.nn as nn
import torchvision
from torch.utils.data import DataLoader
from torchvision import datasets
import torchvision.transforms as transforms
import os
import time
import sys
import torch.quantization

from utils import *

# # Setup warnings
import warnings

warnings.filterwarnings(
    action='ignore',
    category=DeprecationWarning,
    module=r'.*'
)
warnings.filterwarnings(
    action='default',
    module=r'torch.quantization'
)

# Specify random seed for repeatable results
torch.manual_seed(42)

if __name__ == '__main__':
    data_path = '../datasets/imagenet_1k'
    saved_model_dir = '../models/'
    float_model_file = 'mobilenet_pretrained_float.pth'
    scripted_float_model_file = 'mobilenet_quantization_scripted.pth'
    scripted_quantized_model_file = 'mobilenet_quantization_scripted_quantized.pth'

    train_batch_size = 1
    eval_batch_size = 1
    num_eval_batches = 100

    # Source model
    data_loader, data_loader_test = prepare_data_loaders(data_path, train_batch_size, eval_batch_size)
    criterion = nn.CrossEntropyLoss()
    float_model = load_model(saved_model_dir + float_model_file).to('cpu')
    print("\nSource model", end=" ")
    print_size_of_model(float_model)
    float_model.eval()
    top1, top5 = evaluate(float_model, criterion, data_loader_test, neval_batches=num_eval_batches)
    print('Evaluation accuracy on %d images, %2.2f' % (num_eval_batches * eval_batch_size, top1.avg))

    # Fuses modules: Conv+BN+Relu and Conv+Relu
    print()
    print("="*25, "Fuses Modules", "="*25)
    print('Inverted Residual Block: Before fusion \n', float_model.features[1].conv)
    float_model.eval()
    float_model.fuse_model()
    print('\nInverted Residual Block: After fusion\n', float_model.features[1].conv)

    # Baseline model
    print("\nBaseline model", end=" ")
    print_size_of_model(float_model)
    top1, top5 = evaluate(float_model, criterion, data_loader_test, neval_batches=num_eval_batches)
    print('Evaluation accuracy on %d images, %2.2f' % (num_eval_batches * eval_batch_size, top1.avg))
    torch.jit.save(torch.jit.script(float_model), saved_model_dir + scripted_float_model_file)

    # Post-training static quantization
    print()
    print("=" * 25, "Post-training static quantization", "=" * 25)
    num_calibration_batches = 1
    # Specify quantization configuration
    # Start with simple min/max range estimation and per-tensor quantization of weights
    float_model.qconfig = torch.quantization.default_qconfig
    print(float_model.qconfig)
    torch.quantization.prepare(float_model, inplace=True)
    # Calibrate first
    print('Post Training Quantization Prepare: Inserting Observers')
    print('Inverted Residual Block:After observer insertion \n', float_model.features[1].conv)
    # Calibrate with the training set
    evaluate(float_model, criterion, data_loader, neval_batches=num_calibration_batches)
    print('Post Training Quantization: Calibration done')
    # Convert to quantized model
    torch.quantization.convert(float_model, inplace=True)
    print('Post Training Quantization: Convert done')
    print('\n Inverted Residual Block: After fusion and quantization, note fused modules: \n\n',
          float_model.features[1].conv)

    # Quantified model
    print("\nQuantified model", end=" ")
    print_size_of_model(float_model)
    top1, top5 = evaluate(float_model, criterion, data_loader_test, neval_batches=num_eval_batches)
    print('Evaluation accuracy on %d images, %2.2f' % (num_eval_batches * eval_batch_size, top1.avg))

    # 优化
    per_channel_quantized_model = load_model(saved_model_dir + float_model_file)
    per_channel_quantized_model.eval()
    per_channel_quantized_model.fuse_model()
    per_channel_quantized_model.qconfig = torch.quantization.get_default_qconfig('fbgemm')
    print(per_channel_quantized_model.qconfig)

    torch.quantization.prepare(per_channel_quantized_model, inplace=True)
    evaluate(per_channel_quantized_model, criterion, data_loader, num_calibration_batches)
    torch.quantization.convert(per_channel_quantized_model, inplace=True)
    top1, top5 = evaluate(per_channel_quantized_model, criterion, data_loader_test, neval_batches=num_eval_batches)
    print('Evaluation accuracy on %d images, %2.2f' % (num_eval_batches * eval_batch_size, top1.avg))
    torch.jit.save(torch.jit.script(per_channel_quantized_model), saved_model_dir + scripted_quantized_model_file)

    # 导出 torchscript
    input = torch.zeros((1,3,224,224))
    f = torch.jit.trace(per_channel_quantized_model, input)
    f.save(saved_model_dir + "module.torchscript")

