import argparse
import logging
import math
import os
import random
import time

import torch
import yaml
from torch import nn
import matplotlib

matplotlib.use('pdf')
from utils.datasets import create_dataloader, mixup_data, mixup_criterion
from utils.general import increment_dir, labels_to_class_weights, labels_to_image_weights, plot_labels, set_logging, \
    plot_results
from utils.tools import estimate
from utils.torch_utils import init_seeds, cuda2cpu, select_device
from models import transfer
import torch.optim as optim
from torch.optim import lr_scheduler
from pathlib import Path
from tqdm import tqdm
import numpy as np
import torchvision.utils as vutils
import torchsummary
from tensorboardX import SummaryWriter

logger = logging.getLogger(__name__)


def train(hyp, opt, device, log_dir):
    log_dir = Path(log_dir)  # logging directory
    wdir = log_dir / 'weights'  # weights directory
    os.makedirs(wdir, exist_ok=True)
    last = wdir / 'last.pt'
    best = wdir / 'best.pt'
    results_file = str(log_dir / 'results.txt')

    # Save run settings
    with open(log_dir / 'hyp.yaml', 'w') as f:
        yaml.dump(hyp, f, sort_keys=False)
    with open(log_dir / 'opt.yaml', 'w') as f:
        yaml.dump(vars(opt), f, sort_keys=False)

    init_seeds(2)
    summarywriter = SummaryWriter(log_dir)
    print("tensorboard --logdir={} --port=6006 --host=0.0.0.0".format(os.path.abspath(log_dir)))

    with open(opt.data) as f:
        data_dict = yaml.load(f, Loader=yaml.FullLoader)  # data dict

    train_path = data_dict['train']
    val_path = data_dict['val']
    nc, names = int(data_dict['nc']), data_dict['names']  # number classes, names
    assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data)  # check

    model = transfer.Model(opt.model_name, nc=nc, pretrained=True).to(device)  # create

    optimizer = optim.SGD(model.parameters(), lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True)
    # Scheduler https://arxiv.org/pdf/1812.01187.pdf
    # https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR
    lf = lambda x: ((1 + math.cos(x * math.pi / opt.epochs)) / 2) * (1 - hyp['lrf']) + hyp['lrf']  # cosine
    scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
    # scheduler = lr_scheduler.StepLR(optimizer, step_size=13, gamma=0.1)
    # lr_scheduler_ft = lr_scheduler.MultiStepLR(optimizer_ft, [5,10,10,20,20,20], gamma=0.1, last_epoch=-1)
    # lr_scheduler_ft = lr_scheduler.ReduceLROnPlateau(optimizer_ft, mode='min', factor=0.1, patience=2, verbose=False,threshold=0.0001, threshold_mode='rel', cooldown=0, min_lr=1e-7,eps=1e-8)

    best_fitness = 0

    # Trainloader
    train_dataloader, train_datasets = create_dataloader(train_path, opt, hyp, data_dict)
    val_dataloader, val_datasets = create_dataloader(val_path, opt, None, data_dict, isTrain=False)

    # 保存日志到tensorboardX
    modelsummary = transfer.Model(opt.model_name, nc=nc, pretrained=True)  # create
    torchsummary.summary(modelsummary, input_size=(3, opt.img_size[0], opt.img_size[1]), device="cpu")
    dummy_input = torch.randn(1, 3, opt.img_size[0], opt.img_size[1])
    summarywriter.add_graph(modelsummary, (dummy_input))

    onnx_path = os.path.join(log_dir,"netron_model.onnx")
    torch.onnx.export(modelsummary, dummy_input, onnx_path)
    del modelsummary

    # 每个类别的权重
    class_weights = labels_to_class_weights(train_datasets.labels, nc).to(device)  # attach class weights
    # weight = torch.from_numpy(np.array([0.1, 0.8, 1.0, 1.0])).float() 人工设置权重 0类，1类，2类，3类
    criterion = nn.CrossEntropyLoss(class_weights.float()).to(device)

    # 保存labels的图
    plot_labels(train_datasets.labels, save_dir=log_dir)

    # Start training
    t0 = time.time()
    for epoch in range(opt.epochs):
        # train start
        model.train()

        # Update image weights (optional)
        if opt.image_weights:
            cw = class_weights.cpu().numpy() * (1 - np.zeros(nc)) ** 2  # class weights
            iw = labels_to_image_weights(train_datasets.labels, nc=nc, class_weights=cw)  # image weights
            train_datasets.indices = random.choices(range(train_datasets.n), weights=iw,
                                                    k=train_datasets.n)  # rand weighted idx

        logger.info(('%10s' * 6) % ('Epoch', 'loss', 'accuracy', "precision", "recall", 'f1-scores'))
        train_loss = 0
        train_preds = []
        train_gt_labels = []
        train_true_positive_counts = []
        train_false_positive_counts = []
        train_true_negative_counts = []
        train_false_negative_counts = []
        train_precisions = []
        train_recalls = []
        pbar = tqdm(enumerate(train_dataloader), total=len(train_dataloader))
        for i, (train_inputs, labels, paths) in pbar:
            train_inputs = train_inputs.to(device)
            labels = labels.to(device)

            optimizer.zero_grad()  # 梯度置零，也就是把loss关于weight的导数变成0.
            mixup_opt = random.random() < hyp["mixup"]
            if mixup_opt:
                train_inputs, targets_a, targets_b, lam = mixup_data(train_inputs, labels, hyp["mixup_alpha"], device)
                outputs = model(train_inputs)
                loss_func = mixup_criterion(targets_a, targets_b, lam)
                loss = loss_func(criterion, outputs)
            else:
                outputs = model(train_inputs)
                loss = criterion(outputs, labels)

            _, preds = torch.max(outputs, 1)

            loss.backward()
            optimizer.step()
            scheduler.step()  # 调整学习率

            # Print
            train_loss += loss
            if mixup_opt:
                # accuracy = lam * cuda2cpu(preds.eq(targets_a.data).sum()) + (1 - lam) * cuda2cpu(
                #     preds.eq(targets_b.data).sum())  # 这里计算的是相等的个数
                # 有问题的地方
                preds = cuda2cpu(preds)
                train_gt_labels.extend(cuda2cpu(targets_a.data))
                train_preds.extend(preds)
                train_gt_labels.extend(cuda2cpu(targets_b.data))
                train_preds.extend(preds)

                accuracy, precision, recall, f1score, train_cm = estimate(train_gt_labels, train_preds)
            else:
                train_gt_labels.extend(cuda2cpu(labels))
                train_preds.extend(cuda2cpu(preds))
                accuracy, precision, recall, f1score, train_cm = estimate(train_gt_labels, train_preds)

            s = ('%10s' * 1 + '%10.4g' * 5) % (
                '%g/%g' % (epoch, opt.epochs - 1), train_loss / len(train_datasets), accuracy, precision, recall,
                f1score)
            pbar.set_description(s)
            """
            多个变量显示在一个图中
            writer.add_scalars("add_scalars/trigonometric",
                               {'xsinx': epoch * np.sin(epoch / 5), 'xcosx': epoch * np.cos(epoch / 5),
                                'xtanx': np.tan(epoch / 5)}, epoch)
            """
            # 有问题的地方
            # train_true_positive_counts.append(train_cm[0, 0])
            # train_false_positive_counts.append(train_cm[0, 1])
            # train_true_negative_counts.append(train_cm[1, 0])
            # train_false_negative_counts.append(train_cm[1, 1])
            # train_precisions.append(precision)
            # train_recalls.append(recall)
            # summarywriter.add_scalar("train/loss", train_loss / len(train_datasets), global_step=epoch)  # 同一行显示，
            # summarywriter.add_scalar("train/accuracy", accuracy, global_step=epoch)
            # summarywriter.add_scalar("train/precision", precision, global_step=epoch)
            # summarywriter.add_scalar("train/recall", recall, global_step=epoch)
            # summarywriter.add_scalar("train/f1score", f1score, global_step=epoch)
            # end batch ------------------------------------------------------------------------------------------------
        # train end
        # val start
        with torch.no_grad():  # 加这个可以防止内存不足
            val_loss = 0
            val_preds = []
            val_gt_labels = []
            model.eval()  # Set model to evaluate mode
            for i, (inputs, labels, paths) in enumerate(val_dataloader):
                inputs = inputs.to(device)

                val_gt_labels.extend(labels)
                labels = labels.to(device)

                outputs = model(inputs)
                val_loss += criterion(outputs, labels)

                _, preds = torch.max(outputs, 1)
                val_preds.extend(cuda2cpu(preds).tolist())

                # end epoch ----------------------------------------------------------------------------------------------------
            epoch_val_loss = val_loss / len(val_datasets)
            val_accuracy, val_precision, val_recall, val_f1score, _ = estimate(val_gt_labels, val_preds)
            print(
                "val_loss:{:.4f},val_accuracy:{:.4f},val_precision:{:.4f},val_recall:{:.4f},val_f1score:{:.4f}".format(
                    epoch_val_loss, val_accuracy, val_precision, val_recall, val_f1score))
            summarywriter.add_scalar("val/loss", epoch_val_loss, global_step=epoch)
            summarywriter.add_scalar("val/accuracy", val_accuracy, global_step=epoch)
            summarywriter.add_scalar("val/precision", val_precision, global_step=epoch)
            summarywriter.add_scalar("val/recall", val_recall, global_step=epoch)
            summarywriter.add_scalar("val/f1score", val_f1score, global_step=epoch)
            # Write
            with open(results_file, 'a') as f:
                f.write(s + ('%10.4g' * 5) % (epoch_val_loss, val_accuracy, val_precision, val_recall,
                                              val_f1score) + '\n')  # P, R, mAP, F1, test_losses=(GIoU, obj, cls)

            fitness = epoch_val_loss
            if best_fitness == 0 or best_fitness > fitness:
                best_fitness = fitness
        # val end

        # tensorboard histogram,image... start
        if epoch % 10 == 0:
            train_inputs_make_grid = vutils.make_grid(train_inputs.to("cpu"), normalize=True, scale_each=True)
            summarywriter.add_image('Train Image', train_inputs_make_grid, epoch)  # 保存的是训练时候的图片
            summarywriter.add_text('Text', 'text logged at step:' + str(epoch), epoch)
            for name, param in model.named_parameters():
                summarywriter.add_histogram(name, cuda2cpu(param.clone().data), epoch)
            # 有问题的地方
            # PR 曲线
            summarywriter.add_pr_curve_raw('prcurve with train raw data', train_true_positive_counts,
                                           train_false_positive_counts,
                                           train_true_negative_counts,
                                           train_false_negative_counts,
                                           train_precisions,
                                           train_recalls, global_step=epoch)
        # tensorboard histogram,image... end

        # Save model
        torch.save({'model': model.state_dict()}, last)
        if best_fitness == fitness:
            torch.save({'model': model.state_dict()}, best)
        # end epoch -------
    # end training

    # 画训练过程中的图
    plot_results(save_dir=log_dir)  # save as results.png
    logger.info('%g epochs completed in %.3f hours.\n' % (opt.epochs, (time.time() - t0) / 3600))

    torch.cuda.empty_cache()  # 训练完成清空所有cuda缓存
    summarywriter.close()  # 关闭tensorboardX 日志


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--model-name', type=str, default='shufflenet_v2_x0_5', help='initial weights path')  # 要使用那个模型来训练
    parser.add_argument('--data', type=str, default='data/tyre.yaml', help='data.yaml path')
    parser.add_argument('--hyp', type=str, default='data/hyp.scratch.yaml', help='hyperparameters path')  # 超参数搜索
    parser.add_argument('--epochs', type=int, default=100)
    parser.add_argument('--batch-size', type=int, default=2, help='total batch size for all GPUs')
    parser.add_argument('--img-size', nargs='+', type=int, default=[512, 128],
                        help='[train, test] image sizes')  # [h,w] 这里的尺寸设置为16的倍数对GPU要好一些
    parser.add_argument('--image-weights', action='store_true', default=True,
                        help='use weighted image selection for training')  # 使用加权图像选择进行训练，为True的话在训练的时候会更新图片权重
    parser.add_argument('--name', default='tyre', help='renames results.txt to results_name.txt if supplied')
    parser.add_argument('--device', default='1', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
    parser.add_argument('--logdir', type=str, default='runs/', help='logging directory')
    parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers')
    opt = parser.parse_args()

    log_dir = increment_dir(Path(opt.logdir) / 'exp', "{}_{}".format(opt.name, opt.model_name))
    device = select_device(opt.device)

    set_logging()

    with open(opt.hyp, encoding='utf-8') as f:
        hyp = yaml.load(f, Loader=yaml.FullLoader)  # load hyps

    train(hyp, opt, device, log_dir)
