from __future__ import division, print_function
import os
import sys
sys.path.append(os.path.dirname("__file__"))
sys.path.append(os.path.join(os.path.dirname("__file__"), os.path.pardir, os.path.pardir))
import argparse
import time
import random
import numpy as np
from inspect import isfunction
from typing import Dict
from tqdm import tqdm
import paddle
import paddle.vision.models as models
from paddle.io import Dataset, DataLoader

from paddle.quantization import QuantConfig, PTQ
from paddleslim.quant.observers import (
    HistObserver, KLObserver, EMDObserver, MSEObserver, AVGObserver,
    MSEChannelWiseWeightObserver, AbsMaxChannelWiseWeightObserver
)

from utils.dataloader.imagenet_reader import ImageNetDataset

# 注册支持的模型
SUPPORT_MODELS: Dict[str, callable] = {
    name: fn for name, fn in models.__dict__.items()
    if isfunction(fn) and 'pretrained' in fn.__code__.co_varnames
}

# 配置可选observer
ACTIVATION_OBSERVERS = {
    'hist': HistObserver,
    'kl': KLObserver,
    'emd': EMDObserver,
    'mse': MSEObserver,
    'avg': AVGObserver,
}
WEIGHT_OBSERVERS = {
    'mse_channel_wise': MSEChannelWiseWeightObserver,
    'abs_max_channel_wise': AbsMaxChannelWiseWeightObserver,
}

def evaluate(model, dataset):
    loader = DataLoader(dataset, batch_size=1)
    model.eval()
    acc_top1_list, acc_top5_list = [], []
    for image, label in tqdm(loader(), desc="Evaluating"):
        preds = model(image)
        acc1 = paddle.metric.accuracy(preds, label, k=1)
        acc5 = paddle.metric.accuracy(preds, label, k=5)
        acc_top1_list.append(float(acc1.numpy()))
        acc_top5_list.append(float(acc5.numpy()))
    return float(np.mean(acc_top1_list)), float(np.mean(acc_top5_list))

def calibrate(model, dataset, batch_num, batch_size, num_workers=1):
    loader = DataLoader(dataset, batch_size=batch_size, num_workers=num_workers)
    for idx, (images, _) in enumerate(tqdm(loader(), total=batch_num, desc="Calibrating")):
        model(images)
        if 0 < batch_num <= idx + 1:
            break

def main(args):
    if args.ce_test:
        seed = 111
        paddle.seed(seed)
        np.random.seed(seed)
        random.seed(seed)

    # 加载模型
    model_fn = SUPPORT_MODELS[args.model]
    model = model_fn(pretrained=True)
    if args.pretrain_weight:
        state_dict = paddle.load(args.pretrain_weight)
        model.set_state_dict(state_dict)
        print(f'Loaded model weights from {args.pretrain_weight}')
    model.eval()

    # 数据集
    dataset = ImageNetDataset(args.data)

    # Observer设置
    activation_ob = ACTIVATION_OBSERVERS[args.activation_observer]()
    weight_ob = WEIGHT_OBSERVERS[args.weight_observer]()
    config = QuantConfig(weight=None, activation=None)
    config.add_type_config(paddle.nn.Conv2D, activation=activation_ob, weight=weight_ob)
    ptq = PTQ(config)

    # 浮点模型精度
    base_top1, base_top5 = evaluate(model, dataset)
    print(f"\033[31mBaseline(FP32): top1/top5 = {base_top1 * 100:.2f}%/{base_top5 * 100:.2f}%\033[0m")

    # 量化
    quant_model = ptq.quantize(model)
    print("Start PTQ calibration for quantization")
    calibrate(quant_model, dataset, args.quant_batch_num, args.quant_batch_size, num_workers=args.num_workers)
    infer_model = ptq.convert(quant_model, inplace=True)

    # 量化模型精度
    q_top1, q_top5 = evaluate(infer_model, dataset)
    print(f"\033[31mPTQ {args.activation_observer}/{args.weight_observer}: top1/top5 = {q_top1 * 100:.2f}%/{q_top5 * 100:.2f}%\033[0m")

    # 保存推理模型
    dummy_input = paddle.static.InputSpec(shape=[None, 3, 224, 224], dtype='float32')
    os.makedirs(args.output_dir, exist_ok=True)
    paddle.jit.save(infer_model, os.path.join(args.output_dir, "int8_infer"), [dummy_input])

def parse_args():
    parser = argparse.ArgumentParser("Quantization on ImageNet")
    parser.add_argument("--model", type=str, choices=SUPPORT_MODELS.keys(), default='mobilenet_v1', help="Model name")
    parser.add_argument("--pretrain_weight", type=str, default=None, help="Pretrain weight path")
    parser.add_argument("--output_dir", type=str, default='output', help="Save directory")
    parser.add_argument('--data', default="datasets/ILSVRC2012", help='Dataset root directory')
    parser.add_argument("--activation_observer", default='mse', type=str, choices=ACTIVATION_OBSERVERS.keys(), help="Activation observer")
    parser.add_argument("--weight_observer", default='mse_channel_wise', type=str, choices=WEIGHT_OBSERVERS.keys(), help="Weight observer")
    parser.add_argument("--quant_batch_num", default=10, type=int, help="Batch num for quantization")
    parser.add_argument("--quant_batch_size", default=10, type=int, help="Batch size for quantization")
    parser.add_argument("--num_workers", default=5, type=int, help="Number of dataloader workers")
    parser.add_argument('--ce_test', action='store_true', help="Use CE test for seed and worker settings")
    return parser.parse_args()

if __name__ == '__main__':
    args = parse_args()
    print("----------- Configuration Arguments -----------")
    for k, v in sorted(vars(args).items()):
        print(f"{k}: {v}")
    print("------------------------------------------------")
    main(args)