
import argparse
from datetime import datetime
from multiprocessing import freeze_support
from pathlib import Path

import numpy as np
import torch
import torch.distributed as dist
import yaml
from torch.optim import lr_scheduler
from torch.utils.data import DataLoader
from tqdm import tqdm

import sys

from utils.dataloaders import create_dataloader
from utils.general import colorstr

sys.path.append("C:/Users/cyc/Desktop/ppq/ppq/ppq/samples/Imagenet")

from trainer import ImageNetTrainer
from ppq.api import ENABLE_CUDA_KERNEL, export_ppq_graph
import ppq.lib as PFL
import os

import ppq.lib as PFL
import torch
import torchvision
from ppq.api import ENABLE_CUDA_KERNEL, load_native_graph, load_torch_model, load_onnx_graph
from ppq.core import TargetPlatform
from ppq.executor import TorchExecutor
from ppq.quantization.optim import *
from ppq.quantization.quantizer import TensorRTQuantizer, FPGAQuantizer
from ppq.samples.Imagenet.Utilities.Imagenet import *  # check ppq.samples.imagenet.Utilities
from ppq.samples.Imagenet.Utilities.Imagenet.imagenet_util import \
    load_imagenet_from_directory  # check ppq.samples.imagenet.Utilities
from train2 import main
from ppq.IR import GraphFormatter
from yolo_onnx_trainer import ImageNetTrainer
from ppq.samples.QuantZoo.Data import load_coco_detection_dataset

if __name__ == '__main__':

    """
        使用这个脚本来尝试在 Imagenet 数据集上执行量化感知训练
            使用 imagenet 中的数据测试量化精度与 calibration
            默认的 imagenet 数据集位置:Assets/Imagenet_Train, Assets/Imagenet_Valid
            你可以通过软连接创建它们:
                ln -s /home/data/Imagenet/val Assets/Imagenet_Valid
                ln -s /home/data/Imagenet/train Assets/Imagenet_Train
    """
    # freeze_support()
    CFG_DEVICE = 'cuda'  # 一个神奇的字符串，用来确定执行设备
    CFG_BATCHSIZE = 32  # 测试与calib时的 batchsize
    CFG_INPUT_SHAPE = (CFG_BATCHSIZE, 3, 640, 640)  # 用来确定模型输入的尺寸，好像 imagenet 都是这个尺寸
    CFG_VALIDATION_DIR = 'D:\\ppq_reload\\yolov5\\datasets\\coco128\\images\\train2017'  # 用来读取 validation dataset
    CFG_TRAIN_DIR = ''  # 用来读取 train dataset，注意该集合将被用来 calibrate 你的模型
    CFG_PLATFORM = TargetPlatform.FPGA_INT8  # 用来指定目标平台

    # ------------------------------------------------------------
    # 在这个例子中我们将向你展示如何在 PPQ 中对你的网络进行量化感知训练
    # 你可以使用带标签的数据执行正常的训练流程，也可以使用类似蒸馏的方式进行无标签训练
    # PPQ 模型的训练过程与 Pytorch 遵循相同的逻辑，你可以使用 Pytorch 中的技巧来获得更好的训练效果
    # ------------------------------------------------------------

    graph = load_onnx_graph("best_1024.onnx")
    editor = GraphFormatter(graph)

    # ------------------------------------------------------------
    # 我们首先进行标准的量化流程，为所有算子初始化量化信息，并进行 Calibration
    # ------------------------------------------------------------
    quantizer = FPGAQuantizer(graph=graph)
    dispatching_table = PFL.Dispatcher(graph=graph).dispatch(quantizer.quant_operation_types)

    # 为算子初始化量化信息
    for op in graph.operations.values():
        quantizer.quantize_operation(
            op_name=op.name, platform=TargetPlatform.FP32)

    executor = TorchExecutor(graph=graph)
    executor.tracing_operation_meta(inputs=torch.zeros([1, 3, 640, 640]).cuda())

    # with ENABLE_CUDA_KERNEL():
    # ------------------------------------------------------------
    # 创建优化管线，由于后续还要继续训练我们的模型，我们不能在此处调用
    # ParameterBakingPass()，一旦模型权重完成烘焙，则它们不能被进一步调整
    # ------------------------------------------------------------
    calib_loader = load_coco_detection_dataset(
        data_dir="D:\\ppq_reload\\yolov5\\ppq\\samples\\QuantZoo\\Coco\\Calib",
        batchsize=1)
    # 80 tensor
    # shape : [63.5, 1], [80, 1]
    # shape
    # []


    train_path = "D:\\datasets\\coco128\\images\\train2017"
    imgsz = 640
    gs = 32
    single_cls = False
    hyp = {'lr0': 0.01, 'lrf': 0.01, 'momentum': 0.937, 'weight_decay': 0.0005, 'warmup_epochs': 3.0,
           'warmup_momentum': 0.8, 'warmup_bias_lr': 0.1, 'box': 0.05, 'cls': 0.5, 'cls_pw': 1.0, 'obj': 1.0,
           'obj_pw': 1.0, 'iou_t': 0.2, 'anchor_t': 4.0, 'fl_gamma': 0.0, 'hsv_h': 0.015, 'hsv_s': 0.7, 'hsv_v': 0.4,
           'degrees': 0.0, 'translate': 0.1, 'scale': 0.5, 'shear': 0.0, 'perspective': 0.0, 'flipud': 0.0,
           'fliplr': 0.5, 'mosaic': 1.0, 'mixup': 0.0, 'copy_paste': 0.0}
    opt = argparse.Namespace(artifact_alias='latest', batch_size=1, bbox_interval=-1, bucket='', cache=None, cfg='',
                             cos_lr=False, data='data\\coco128.yaml', device='', entity=None, epochs=10, evolve=None,
                             exist_ok=False, freeze=[0],
                             hyp={'lr0': 0.01, 'lrf': 0.01, 'momentum': 0.937, 'weight_decay': 0.0005,
                                  'warmup_epochs': 3.0, 'warmup_momentum': 0.8, 'warmup_bias_lr': 0.1, 'box': 0.05,
                                  'cls': 0.5, 'cls_pw': 1.0, 'obj': 1.0, 'obj_pw': 1.0, 'iou_t': 0.2, 'anchor_t': 4.0,
                                  'fl_gamma': 0.0, 'hsv_h': 0.015, 'hsv_s': 0.7, 'hsv_v': 0.4, 'degrees': 0.0,
                                  'translate': 0.1, 'scale': 0.5, 'shear': 0.0, 'perspective': 0.0, 'flipud': 0.0,
                                  'fliplr': 0.5, 'mosaic': 1.0, 'mixup': 0.0, 'copy_paste': 0.0}, image_weights=False,
                             imgsz=640, label_smoothing=0.0, local_rank=-1, multi_scale=False, name='exp',
                             noautoanchor=False, noplots=False, nosave=False, noval=False, optimizer='SGD',
                             patience=100, project='runs\\train', quad=False, rect=False, resume=False,
                             save_dir='runs\\train\\exp253', save_period=-1, seed=0, single_cls=False, sync_bn=False,
                             upload_dataset=False, weights='yolov5s.pt', workers=0)
    LOCAL_RANK = -1
    workers = 1
    train_loader, dataset = create_dataloader(train_path,
                                              imgsz,
                                              1,
                                              gs,
                                              single_cls,
                                              hyp=hyp,
                                              augment=True,
                                              cache=None if opt.cache == 'val' else opt.cache,
                                              rect=opt.rect,
                                              rank=LOCAL_RANK,
                                              workers=workers,
                                              image_weights=opt.image_weights,
                                              quad=opt.quad,
                                              prefix=colorstr('train: '),
                                              shuffle=True)
    train_dataloader = DataLoader(dataset=dataset,
                                  batch_size=1,
                                  shuffle=True,
                                  num_workers=0,
                                  pin_memory=False,
                                  drop_last=True  # onnx 模型不支持动态 batchsize，最后一个批次的数据尺寸可能不对齐，因此丢掉最后一个批次的数据
                                  )
    val_path = 'D:\\datasets\\coco128\\images\\train2017'
    val_loader = create_dataloader(val_path,
                                   imgsz,
                                   1 // 1 * 2,
                                   gs,
                                   single_cls,
                                   hyp=hyp,
                                   cache=None if False else opt.cache,
                                   rect=True,
                                   rank=-1,
                                   workers=workers,
                                   pad=0,
                                   prefix=colorstr('val: '))[0]
    # test_loader = load_coco_detection_dataset(
    #     data_dir=TEST_DIR,
    #     batchsize=1)

    pipeline = PFL.Pipeline([
        QuantizeSimplifyPass(),
        QuantizeFusionPass(activation_type=quantizer.activation_fusion_types),
        ParameterQuantizePass(),
        RuntimeCalibrationPass(method='kl'),
        PassiveParameterQuantizePass(),
        QuantAlignmentPass(),
        # LearnedStepSizePass(steps=500, block_size=5)
    ])

    # quantizer.quantize()
    pipeline.optimize(
        calib_steps=8, collate_fn=lambda x: x[0].cuda(),
        graph=graph, dataloader=calib_loader, executor=executor)

    # ------------------------------------------------------------
    # 完成量化后，我们将开始进行 QAT 的模型训练，我们希望你能够注意到：
    #
    # 1. 不能从零开始使用 QAT 的方法完成训练，你应当先训练好浮点的模型，或者在一个预训练的模型基础上进行 QAT finetuning.
    # 2. 你必须完成标准量化流程
    # 3. PPQ Executor 长得很像 Pytorch Module，单机训练应该不会遇到太多困难，但它不支持多卡训练

    # 训练的代码我们封装进了一个叫做 ImageNetTrainer 的东西
    # 你可以打开它看到具体的训练逻辑
    # ------------------------------------------------------------
    # model = torchvision.models.MobileNetV2.cuda()
    trainer = ImageNetTrainer(graph=graph, model="")
    # compute =
    best_acc = 0
    # trainer.eval(val_loader)
    for epoch in range(20):
        # print(1)
        trainer.epoch(train_loader, "")
        current_acc = trainer.eval(val_loader, "")
        if current_acc > best_acc:
            trainer.save('Best.native')

    graph = load_native_graph(import_file='Best.native')
    PFL.Exporter(platform=TargetPlatform.ONNXRUNTIME).export(
        file_path='export.onnx', graph=graph, config_path='export.json')
