from pathlib import Path
from typing import Iterable, Tuple
import pickle
import numpy as np
import torch
import os
import sys

from torch.optim import lr_scheduler

from ppq.executor import TorchExecutor
from ppq.IR import BaseGraph, TrainableGraph
from tqdm import tqdm
from utils.plots import output_to_target, plot_images, plot_val_study
from utils.callbacks import Callbacks
from utils.general import non_max_suppression, one_cycle
from utils.metrics import ConfusionMatrix, ap_per_class
from utils.torch_utils import time_sync, smart_optimizer
# from utils.general import (LOGGER, check_dataset, check_img_size, check_requirements, check_yaml,
#                            coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args,
#                            scale_coords, xywh2xyxy, xyxy2xywh)
from val import process_batch

FILE = Path(__file__).resolve()
ROOT = FILE.parents[0]  # YOLOv5 root directory
if str(ROOT) not in sys.path:
    sys.path.append(str(ROOT))  # add ROOT to PATH
ROOT = Path(os.path.relpath(ROOT, Path.cwd()))  # relative


class ImageNetTrainer():
    """
    ### Network Trainer for ImageNet Classification Task.

    Member Functions:
    1. epoch(): do one epoch training.
    2. step(): do one step training.
    3. eval(): evaluation on given dataset.
    4. save(): save trained model.
    5. clear(): clear training cache.

    PPQ will create a TrainableGraph on you graph, a wrapper class that
        implements a set of useful functions that enable training. You are recommended
        to edit its code to add new feature on graph.

    Optimizer controls the learning process and determine the parameters values ends up learning,
        you can rewrite the defination of optimizer and learning scheduler in __init__ function.
        Tuning them carefully, as hyperparameters will greatly affects training result.
    """

    def __init__(
            self, graph: BaseGraph, model: torch.nn.Module, device: str = 'cuda') -> None:

        self._epoch = 0
        self._step = 0
        self._best_metric = 0
        self._loss_fn = torch.nn.CrossEntropyLoss().to(device)
        self._executor = TorchExecutor(graph, device=device)
        self._training_graph = TrainableGraph(graph)
        # self._executor = executor
        self.graph = graph

        for tensor in self._training_graph.parameters():
            tensor.requires_grad = True
            # print(tensor.grad)

        # self._optimizer = opt.optimizer
        self._optimizer = torch.optim.Adam(
            params=self._training_graph.parameters(), lr=3e-5
        )
        # # Scheduler
        # if opt.cos_lr:
        #     lf = one_cycle(1, hyp['lrf'], opt.epochs)  # cosine 1->hyp['lrf']
        # else:
        #     lf = lambda x: (1 - x / opt.epochs) * (1.0 - hyp['lrf']) + hyp['lrf']  # linear
        self._lr_scheduler = None

    def epoch(self, dataloader: Iterable, compute_loss) -> float:
        """Do one epoch Training with given dataloader.
        
        Given dataloader is supposed to be a iterable container of batched data,
            for example it can be a list of [img(torch.Tensor), label(torch.Tensor)].
        
        If your data has other layout that is not supported by this function,
            then you are supposed to rewrite the logic of epoch function by yourself.
        """
        epoch_loss = 0

        # test by yz
        # for variable in self.graph.variables.values():
        #     if variable.name == '1066':
        #         print("————————————————")
        # print(variable.value)
        for bidx, batch in enumerate(tqdm(dataloader, desc=f'Epoch {self._epoch}: ', total=len(dataloader))):
            if type(batch) not in {tuple, list}:
                raise TypeError('Feeding Data is invalid, expect a Tuple or List like [data, label], '
                                f'however {type(batch)} was given. To feed customized data, you have to rewrite '
                                '"epoch" function of PPQ Trainer.')
            # by zzp 以下三行已注释
            # if len(batch) != 2:
            #     raise ValueError('Unrecognized data format, '
            #                      'your dataloader should contains batched data like [data, label]')

            data, label = batch
            # data, label = batch
            data, label = data.cuda(), label.cuda()
            # print(data.shape)
            _, loss = self.step(data, label, True, compute_loss)
            print(loss)
            epoch_loss += loss

        self._epoch += 1
        return epoch_loss

    def step(self, data: torch.Tensor, label: torch.Tensor, training: bool, compute_loss) -> Tuple[torch.Tensor, float]:
        """Do one step Training with given data(torch.Tensor) and label(torch.Tensor).
        
        This one-step-forward function assume that your model have only one input and output variable.
        
        If the training model has more input or output variable, then you might need to
            rewrite this function by yourself.
        """
        if training:
            print(data.size())
            print(label.size())
            pred = self._executor.forward_with_gradient(data)[1:]
            # 这一句原本输出的pred是len=4                            conv2D
            # 后三个【1：】和下面的是对应的，所以我原来写的就是【1：】这样
            # 但是现在他len=1 ，后面三个没有了
            # label = self._executor.forward(data)
            # pred = self._executor.forward_with_gradient(torch.rand([16, 3, 640, 640]))
            # print(pred.size())
            print(len(pred))
            # print(pred[0].size())
            # print(pred[1].size())
            # print(pred[2].size())
            # print(pred[3].size())
            #
            # exit()
            # pred = torch.stack(pred, 0)
            # loss = self._loss_fn(pred, label)
            loss, loss_items = compute_loss(pred, label)  # loss scaled by batch_size
            loss.backward()
            if self._lr_scheduler is not None:
                self._lr_scheduler.step(epoch=self._epoch)
            self._optimizer.step()
            self._training_graph.zero_grad()

            self._step += 1
            return pred, loss.item()

    def eval(self, names, dataloader: Iterable, compute_loss) -> float:
        """Do Evaluation process on given dataloader.
        
        Split your dataset into training and evaluation dataset at first, then
            use eval function to monitor model performance on evaluation dataset.

        Here are some options to prevent overfitting, which helps improve the model performance.
        1. Train with more data.
        2. Data augmentation.
        3. Addition of noise to the input data.
        """

        s = ('%20s' + '%11s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95')
        with torch.no_grad():
            for bidx, batch in enumerate(tqdm(dataloader, desc=s, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}')):
                if type(batch) not in {tuple, list}:
                    raise TypeError('Feeding Data is invalid, expect a Tuple or List like [data, label], '
                                    f'however {type(batch)} was given. To feed customized data, you have to rewrite '
                                    '"eval" function of PPQ Trainer.')
                # if len(batch) != 2:
                #     raise ValueError('Unrecognized data format, '
                #                      'your dataloader should contains batched data like [data, label]')

                data, label, paths, shapes = batch
                data, label = data.cuda(), label.cuda()
                # print(type(model))
                # names = dict(enumerate(model.names if hasattr(model, 'names') else model.module.names))
                tmp = self._executor.forward(data)

                names = dict(enumerate(names))
                batch_size = 1
                imgsz = 640,  # inference size (pixels)
                weights = None,
                device = 'cuda'
                conf_thres = 0.001,  # confidence threshold
                iou_thres = 0.6,  # NMS IoU threshold
                single_cls = False,  # treat as single-class dataset
                augment = False,  # augmented inference
                verbose = False,  # verbose output
                project = ROOT / 'runs/val',  # save to project/name
                save_txt = False,  # save results to *.txt
                save_hybrid = False,  # save label+prediction hybrid results to *.txt
                half = True,  # use FP16 half-precision inference
                dnn = False,  # use OpenCV DNN for ONNX inference
                save_dir = Path('')
                # save_dir = increment_path(Path(project) / name, exist_ok=exist_ok)  # increment run
                # (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True)  # make dir
                plots = True,
                callbacks = Callbacks(),

                iouv = torch.linspace(0.5, 0.95, 10, device=device)  # iou vector for mAP@0.5:0.95
                niou = iouv.numel()
                seen = 0
                nc = 1 if single_cls else int(data['nc'])  # number of classes
                confusion_matrix = ConfusionMatrix(nc=nc)
                dt, p, r, f1, mp, mr, map50, map = [0.0, 0.0, 0.0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
                loss = torch.zeros(3, device='cuda')
                jdict, stats, ap, ap_class = [], [], [], []
                targets = label
                im = data
                t1 = time_sync()
                im = im.half() if half else im.float()  # uint8 to fp16/32
                im /= 255  # 0 - 255 to 0.0 - 1.0
                nb, _, height, width = im.shape  # batch size, channels, height, width
                t2 = time_sync()
                dt[0] += t2 - t1

                # Inference
                out = tmp[0]
                train_out = tmp[1:]
                dt[1] += time_sync() - t2

                # Loss
                if compute_loss:
                    loss += compute_loss([x.float() for x in train_out], targets)[1]  # box, obj, cls

                # NMS
                targets[:, 2:] *= torch.tensor((width, height, width, height), device=device)  # to pixels
                lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else []  # for autolabelling
                t3 = time_sync()
                # out = tuple(out)
                # out = non_max_suppression(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls)
                dt[2] += time_sync() - t3

                # Metrics
                for si, pred in enumerate(out):
                    labels = targets[targets[:, 0] == si, 1:]
                    nl, npr = labels.shape[0], pred.shape[0]  # number of labels, predictions
                    path, shape = Path(paths[si]), shapes[si][0]
                    correct = torch.zeros(npr, niou, dtype=torch.bool, device=device)  # init
                    seen += 1

                    if npr == 0:
                        if nl:
                            stats.append((correct, *torch.zeros((2, 0), device=device), labels[:, 0]))
                            if plots:
                                confusion_matrix.process_batch(detections=None, labels=labels[:, 0])
                        continue

                    # Predictions
                    if single_cls:
                        pred[:, 5] = 0
                    predn = pred.clone()
                    scale_coords(im[si].shape[1:], predn[:, :4], shape, shapes[si][1])  # native-space pred

                    # Evaluate
                    if nl:
                        tbox = xywh2xyxy(labels[:, 1:5])  # target boxes
                        scale_coords(im[si].shape[1:], tbox, shape, shapes[si][1])  # native-space labels
                        labelsn = torch.cat((labels[:, 0:1], tbox), 1)  # native-space labels
                        correct = process_batch(predn, labelsn, iouv)
                    stats.append((correct, pred[:, 4], pred[:, 5], labels[:, 0]))  # (correct, conf, pcls, tcls)

                # callbacks.run('on_val_batch_end')
                # Plot images
                if plots and bidx < 3:
                    plot_images(im, targets, paths, save_dir / f'val_batch{bidx}_labels.jpg', names)  # labels
                    plot_images(im, output_to_target(out), paths, save_dir / f'val_batch{bidx}_pred.jpg',
                                names)  # pred

                # Compute metrics
            stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)]  # to numpy
            if len(stats) and stats[0].any():
                tp, fp, p, r, f1, ap, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names)
                ap50, ap = ap[:, 0], ap.mean(1)  # AP@0.5, AP@0.5:0.95
                mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
            nt = np.bincount(stats[3].astype(int), minlength=nc)  # number of targets per class

            # Print results per class
            pf = '%20s' + '%11i' * 2 + '%11.3g' * 4  # print format
            LOGGER.info(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
            if (verbose or (nc < 50)) and nc > 1 and len(stats):
                for i, c in enumerate(ap_class):
                    LOGGER.info(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))
            t = tuple(x / seen * 1E3 for x in dt)  # speeds per image

            shape = (batch_size, 3, imgsz, imgsz)
            LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t)
            # Return results
            s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
            LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
            result = (mp, mr, map50, map, *(loss.cpu() / 32).tolist())
        return result

    def save(self, file_path: str):
        """ Save model to given path.
        Saved model can be read by ppq.api.load_native_model function.
        """
        from ppq.parser import NativeExporter
        exporter = NativeExporter()

        exporter.export(file_path=file_path, graph=self.graph)

    def clear(self):
        """Clear training state."""
        for tensor in self._training_graph.parameters():
            tensor.requires_grad = False
            tensor._grad = None
