import time
import warnings
import logging
import os, sys, math
import argparse
import collections
from collections import deque
from copy import deepcopy
import datetime
from typing import Iterable

import cv2
import zipfile
import pprint
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch import optim
from torch.nn import functional as F
from torchvision.ops import nms
# from torchsummary import summary

from dataset.dataloader import CocoDataset, SimpleDetDataset, VOCDataset
# import coco_eval
from util.metrics import NMS
from train import train_one_epoch, freeze_backbone, unfreeze, eval_model
from hub.yolov8 import yolov8, yolov8_transfromer_head, yolov8_p2, yolov8_p2_tf, yolov8_embed_transfromer_head, yolov8_p2_cmbl, yolov8_cmbl, yolov8_sk
from loss.loss import Loss
from visualize.trainning_visualize import Tranning_Visualization

DEFAULT_CUDA_ID = 0


# 如果是modelart训练，则需要设置zip_path、weight、save_dir到obs中的对应位置
def main(args=None):
    # 参数设置
    parser = argparse.ArgumentParser(description='Simple training script.')
    # 数据集
    parser.add_argument('--dataset', help='Dataset type, must be one of simple or coco.', default='simple')
    parser.add_argument('--dataset_path', help='Path to COCO directory', default='../dataset/Road_Defect')
    # parser.add_argument('--dataset_path', help='Path to COCO directory', default='../dataset/SYNTHIA')
    parser.add_argument('--input_shape', help='input_shape', type=int, default=640)
    parser.add_argument('--zip_path', help='Path to COCO directory', default='')
    # parser.add_argument('--zip_path', help='Path to COCO directory', default='obs://jincheng/aisafety_dataset/aisafety.zip')
    parser.add_argument('--num_workers', default=8, type=int)
    # 训练参数
    parser.add_argument('--device', default='cuda',help='device to use for training / testing')
    parser.add_argument('--epochs', help='Number of epochs', type=int, default=50)
    parser.add_argument('--amp', help='trainning with amp, True or False', type=bool, default=True)             #是否进行混合精度
    parser.add_argument('--lr', default=1e-3, type=float)
    parser.add_argument('--batch_size', default=32, type=int)
    parser.add_argument('--optimizer_type', help='the optimizer type, sgd or adam', type=str, default='adam')
    parser.add_argument('--weight_decay', default=0.0005, type=float)
    parser.add_argument('--momentum', default=0.937, type=float)
    parser.add_argument('--depth', help='model depth, must be one of l, m, s, n', type=str, default='s')
    parser.add_argument('--log_file', help='trainning log', type=str, default='weights/_road/loss_log.txt')
    parser.add_argument('--frozen_weights', type=float, default=0.0,
                        help="if frozen weight, if set, the backbone will be unfreezed in epoch * frozen_weights")         #是否冻结主干，大于零则进行冻结训练，否则进行冻结epoch大于这个点后进行解冻
    
    # 权重加载
    parser.add_argument('--resume', type=str, default='', help='resume from checkpoint')
    # parser.add_argument('--weight', type=str, help='weigth path of model', default='obs://jincheng/detection/my_work/weights/official/yolov8s_state_dict.pt')
    # parser.add_argument('--weight', type=str, help='weigth path of model', default='weights/official/yolov8l_state_dict.pt')
    # parser.add_argument('--weight', type=str, help='weigth path of model', default='weights/official/yolov8s_state_dict.pt')
    parser.add_argument('--weight', type=str, help='weigth path of model', default='')
    parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
                        help='start epoch')
    # 输出目录
    # parser.add_argument('--save_dir', type=str, default='obs://jincheng/detection/my_work/train_log', help='path to save weights')
    parser.add_argument('--save_dir', type=str, default='weights/_road', help='path to save weights')
    parser.add_argument('--save_epoch', type=int, default=1, help='save epoch')
    
    parser = parser.parse_args(args)

    # device
    device = torch.device('cpu')
    if parser.device == 'cuda' and torch.cuda.is_available():
        print(torch.cuda.get_device_properties(DEFAULT_CUDA_ID))
        device = torch.device('cuda')
    
    # 优先使用zip，如果要训练本地数据，要先把zip的default设置为无
    dataset_path = ''
    if parser.zip_path:
        zip_file = zipfile.ZipFile(parser.zip_path)
        dataset_path = zip_file.namelist()[0]
        # 解压
        zip_extract = zip_file.extractall('./')
        zip_file.close()
    else:
        dataset_path = parser.dataset_path

    # 数据集  Create the data loaders
    if parser.dataset == 'coco':
        if dataset_path is None:
            raise ValueError('Must provide --dataset_path when training on COCO,')
        dataset_train = CocoDataset(dataset_path, set_name='train',
                                    batch=parser.batch_size,
                                    # transform=transforms.Compose([Normalizer(), Augmenter(), Resizer()])
                                    )
        dataset_val = CocoDataset(dataset_path, set_name='val',
                                  batch=parser.batch_size,
                                  # transform=transforms.Compose([Normalizer(), Resizer()])
                                  )
    elif parser.dataset == 'simple':         #自己实现的数据集
        dataset_train = SimpleDetDataset(dataset_path, annotation_file='train.txt',
                                    batch=parser.batch_size,
                                    device=device,
                                    resize=(parser.input_shape, parser.input_shape)
                                    )
        dataset_val = SimpleDetDataset(dataset_path, annotation_file='val.txt',
                                  batch=parser.batch_size,
                                  device=device,
                                  resize=(parser.input_shape, parser.input_shape)
                                  # transform=transforms.Compose([Normalizer(), Resizer()])
                                  )
    else:
        raise ValueError('Dataset type not understood (must be simpledet or coco), exiting.')
    print('Load dataset from {} with {} classes, {} labels'.format(dataset_path, dataset_train.num_classes(), len(dataset_train)))

    dataloader_train = DataLoader(dataset_train, num_workers=parser.num_workers, batch_size=parser.batch_size, shuffle=True, collate_fn=SimpleDetDataset.collate_fn)
    if dataset_val is not None:
        dataloader_val = DataLoader(dataset_val, num_workers=parser.num_workers, batch_size=parser.batch_size, shuffle=True, collate_fn=SimpleDetDataset.collate_fn)
    # 模型Create the model
    print('model type is {}'.format(parser.depth))
    # model = yolov8_sk.yolov8_detect(dataset_train.num_classes(), parser.depth)
    model = yolov8.yolov8_detect(dataset_train.num_classes(), parser.depth)
    # model = yolov8_cmbl.yolov8_detect(dataset_train.num_classes(), parser.depth)
    # model = yolov8_transfromer_head.yolov8_detect(dataset_train.num_classes(), parser.depth)
    # model = yolov8_p2.yolov8_detect(dataset_train.num_classes(), parser.depth)
    model.to(device)
    # model = torch.nn.DataParallel(model)

    # Loss
    compute_loss = Loss(model, device)

    # 训练
    weight_decay = parser.weight_decay           #权重衰退的正则项，防止过拟合
    momentum=parser.momentum
    lr = parser.lr
    optimizer_type = parser.optimizer_type
    print('optimizer type={}, train with lr={}, weight_decay={},momentum={}'.format(optimizer_type, lr, weight_decay, momentum))
    # 是否使用混合精度
    print('whether use amp: {}'.format(parser.amp))
    if parser.amp:
        from torch.cuda.amp import GradScaler as GradScaler
        scaler = GradScaler()
    else:
        scaler = None
    tv = Tranning_Visualization(parser.save_dir)
    best_fitness = None
    model.training = True
    optimizer = {
            'adam': optim.Adam(model.parameters(), lr=lr, betas=(momentum, 0.999), weight_decay=weight_decay),
            'sgd': optim.SGD(model.parameters(), lr=lr, momentum=momentum, nesterov=True, weight_decay=weight_decay)
            # 'sgd': optim.SGD(model.parameters(), lr=lr)
        }[optimizer_type]
    
    # optimizer = optim.Adam(model.parameters(), lr=parser.lr, weight_decay=weight_decay)
    # optimizer = optim.SGD(model.parameters(), lr=parser.lr, momentum=momentum, weight_decay=weight_decay)
    lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, verbose=True)

    if parser.weight:
        weight_path = parser.weight
        print('load pretrained params from {}'.format(weight_path))
        if os.path.exists(parser.weight):
            print('load weight from {}'.format(weight_path))
            model_dict = model.state_dict()
            model_dict_keys = list(model_dict.keys())
            pretrained_dict = torch.load(weight_path, map_location=device)
            pretrained_dict = {k:v for k, v in pretrained_dict.items()}

            load_key, no_load_key, temp_dict = [], [], {}
            for i, k in enumerate(pretrained_dict.keys()):
                v = pretrained_dict[k]
                if np.shape(model_dict[model_dict_keys[i]]) == np.shape(v):
                    temp_dict[model_dict_keys[i]] = v
                    load_key.append(model_dict_keys[i])
                else:
                    no_load_key.append(model_dict_keys[i])
            model_dict.update(temp_dict)
            model.load_state_dict(model_dict)
            print("\nSuccessful Load Key:", str(load_key)[:500], "……\nSuccessful Load Key Num:", len(load_key))
            print("\nFail To Load Key:", str(no_load_key)[:500], "……\nFail To Load Key num:", len(no_load_key))
        else:
            print("weight in {} does not exists".format(weight_path))

    # 加载权重或者checkpoint， 优先加载resume中的权重
    if parser.resume:
        print('resume trainning status from {}'.format(parser.resume))
        checkpoint = torch.load(parser.resume, map_location='cpu')
        model = model.load_state_dict(checkpoint['model'])
        if 'optimizer' in checkpoint:
            optimizer.load_state_dict(checkpoint['optimizer'])
        if 'epoch' in checkpoint:
            parser.start_epoch = checkpoint['epoch'] + 1
        if 'lr_scheduler' in checkpoint:
            lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
    best_val_loss = 0
    
    # model.module.freeze_bn()
    print('Num training images: {}'.format(len(dataset_train)))
    if parser.frozen_weights > 0:
        print('frozen backbone')
        freeze_backbone(model)
    print('Start trainning\r\n')
    print('Resault will be saved to {}'.format(parser.save_dir))
    word_max_length = 20
    table_header = 'Epoch'.center(word_max_length) +\
          'trainnig loss'.center(word_max_length) +\
            'val loss'.center(word_max_length) +\
            'trainning time'.center(word_max_length) +\
            'val time'.center(word_max_length) +\
            'used memory'.center(word_max_length)
    if parser.log_file:
        print('trainnig log will be writen to file {}'.format(parser.log_file))
        f = open(parser.log_file, 'w')
        f.writelines(table_header + '\r\n')
        f.close()
    print(table_header)
    warnings.filterwarnings("ignore")
    for epoch_num in range(parser.epochs):
        if parser.frozen_weights > 0 and epoch_num == int(parser.frozen_weights * parser.epochs):
            unfreeze(model)
        epoch_loss, val_loss = train_one_epoch(model, compute_loss, dataloader_train, dataloader_val, optimizer, device, epoch_num, parser.epochs, scaler, word_max_length, parser.log_file)
        tv.epochs_losses.append(epoch_loss)
        tv.epoch_val_loss.append(val_loss)
        lr_scheduler.step(epoch_loss)
        if epoch_num == 0:
            best_val_loss = val_loss
        elif best_val_loss > val_loss:
            print('save a best state dict')
            best_val_loss = val_loss
            torch.save(model.state_dict(), os.path.join(parser.save_dir, 'best_state_dict.pt'))        

        if (epoch_num + 1) % parser.save_epoch == 0:
            # print('eval model')
            tv.epochs_mAP[epoch_num]=eval_model(model, dataloader_val, word_max_length, parser.log_file, device)
            save_name = 'epoch_{}_val_loss={:1.5f}.pt'.format(epoch_num, val_loss)
            # print('save model state dict to {}'.format(save_name))
            # tv.draw_epochs_losses()
            # tv.draw_epochs_mAP()
            # tv.save_all()
            torch.save(model.state_dict(), os.path.join(parser.save_dir, save_name))

        # ckpt = {
        #     'epoch': epoch_num,
        #     'best_fitness': best_fitness,
        #     'model': model.state_dict(),
        #     # 'ema': deepcopy(self.ema.ema).half(),
        #     # 'updates': self.ema.updates,
        #     'optimizer': optimizer.state_dict(),
        #     'lr_scheduler': lr_scheduler.state_dict()
        #     # 'train_args': vars(self.args),  # save as dict
        #     # 'date': datetime.now().isoformat(),
        #     # 'version': __version__
        # }
        # torch.save(ckpt, '{}_yolov8_{}_{}.pt'.format(parser.dataset, parser.depth, epoch_num))
    # model.eval()

    torch.save(model.state_dict(), os.path.join(parser.save_dir, 'model_final_state_dict.pt'))
    tv.draw_epochs_losses()
    tv.draw_epochs_mAP()
    tv.save_all()
if __name__ == '__main__':
    main()
    # dataset = CocoDataset('China_MotorBike_coco', 'train')
