#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@ File: settings.py
@ Author: YuJX
@ Function : 选择ML算法各个环节的具体组件
                --- Dataset: 根据数据集名称选择
                --- Model: 根据名称选择detector, backbone, neck, head等
                --- Optimizer: 根据名称选择优化器

"""
import os
import time

import random
import numpy as np
import torch
import torchvision 

import torch.optim as optim

from utils.dataset import get_classes

from models.detectors.trainer import FasterRCNNTrainer, YOLOv3Trainer
from models.detectors.FasterRCNN import FasterRCNN, FasterRCNNLoss, FasterRCNNBboxDecoder

from models.detectors.YOLOv3 import YOLOv3, YOLOv3Loss



def set_dataset(args):
    """
    VOC格式的数据集选择
    """
    # TODO NWPUv1，需要先进行数据的基本处理： 1. overlap cut 2. nwpu2voc 3. 划分train, val, tets 4.可视化验证和评估
    if args.dataset == 'NWPUv2':
        input_shape = [400,400]
        classes_path = 'model_data/nwpuv2/nwpuv2_classes.txt'
        # train_annotation_path = 'model_data/nwpuv2/train.txt'
        # val_annotation_path = 'model_data/nwpuv2/val.txt'
        train_annotation_path = 'model_data/nwpuv2/trainval.txt'
        val_annotation_path = 'model_data/nwpuv2/test.txt'
        test_annotation_path = 'model_data/nwpuv2/test.txt'
        
    elif args.dataset == 'DIOR': 
        input_shape = [800, 800]
        classes_path = 'model_data/dior/dior_classes.txt'
        # train_annotation_path = 'model_data/dior/train.txt'
        # val_annotation_path = 'model_data/dior/val.txt'
        train_annotation_path = 'model_data/dior/trainval.txt'
        val_annotation_path = 'model_data/dior/test.txt'
        test_annotation_path = 'model_data/dior/test.txt'

    else:
        print("\n\033[1;31;40m Error: Cannot recognize the dataset \033[0m")
        assert(False)

    # 数据类别和数量
    class_names, num_classes = get_classes(classes_path)
    with open(train_annotation_path, encoding='utf-8') as f:
        train_lines = f.readlines()
    with open(val_annotation_path, encoding='utf-8') as f:
        val_lines   = f.readlines()
    with open(test_annotation_path, encoding='utf-8') as f:
        test_lines   = f.readlines()
    num_train    = len(train_lines)
    num_val      = len(val_lines)
    num_test     = len(test_lines)

    print('\n\033[1;33;40mDataset: {} \033[0m'.format(args.dataset))

    return input_shape, class_names, num_classes,  num_train, num_val, num_test, train_lines, val_lines, test_lines

def set_detector(args):

    # 选择检测器，此时其中的参数为随机初始化
    if args.detector == 'FasterRCNN':
        detector = FasterRCNN(args)
        detector.freeze_bn()
        loss     = FasterRCNNLoss(args)
        trainer  = FasterRCNNTrainer(args)
        decoder  = FasterRCNNBboxDecoder(args)
        print("\n\033[1;33;40mDetector : Faster RCNN \033[0m")
    # TODO 加入YOLOv3检测方法
    elif args.detector == 'YOLOv3':
        detector = YOLOv3(args)
        loss     = YOLOv3Loss(args)
        trainer = YOLOv3Trainer(args)
    else:
        raise ValueError("Unrecognized Detector")

    return detector, loss, trainer, decoder

def set_optimizer(args):
    optimizer = {
            'adam'  : optim.Adam(args.model.parameters(), args.lr, betas = (args.momentum, 0.999), weight_decay = args.weight_decay),
            'sgd'   : optim.SGD(args.model.parameters(), args.lr, momentum = args.momentum, nesterov=True, weight_decay = args.weight_decay)
        }[args.optimizer]
    
    return optimizer
    # #---------------------------------------#
    # TODO 对模型的不同层、不同组成部分设置不同的学习率
    # optimizer = torch.optim.SGD([{'params': net.parameters(), 'initial_lr': 0.1}], lr = 0.1)
    # #   根据optimizer_type选择优化器，对模型的不同组成部分使用不同的配置
    # #---------------------------------------#
    # pg0, pg1, pg2 = [], [], []  
    # for k, v in model.named_modules():
    #     if hasattr(v, "bias") and isinstance(v.bias, nn.Parameter):
    #         pg2.append(v.bias)    
    #     if isinstance(v, nn.BatchNorm2d) or "bn" in k:
    #         pg0.append(v.weight)    
    #     elif hasattr(v, "weight") and isinstance(v.weight, nn.Parameter):
    #         pg1.append(v.weight)   
    # optimizer = {
    #     'adam'  : optim.Adam(pg0, Init_lr_fit, betas = (momentum, 0.999)),
    #     'sgd'   : optim.SGD(pg0, Init_lr_fit, momentum = momentum, nesterov=True)
    # }[optimizer_type]
    # optimizer.add_param_group({"params": pg1, "weight_decay": weight_decay})
    # optimizer.add_param_group({"params": pg2})

def seed_everything(args):
    seed = args.seed   
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)
        torch.cuda.manual_seed_all(seed)
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False
    print('\n\033[1;33;40mSet seed to {}\033[0m'.format(seed))

def set_gpu_env(args):
    """
    设置计算环境
    Args:
        seed: 种子
        gpu: 显卡id
    """
    gpus = args.gpu_id

    if len(gpus) == 1:
        torch.cuda.set_device(gpus[0])
        
    ngpus_per_node = torch.cuda.device_count ()

    print("HW Env:")
    print('       Number of GPU devices Available: {}'.format(ngpus_per_node))
    print('\n\033[1;33;40mUsing GPUs: No. {}\033[0m'.format([gpu for gpu in gpus]))

def set_computing_env(args):
    # 实验软件环境的各种版本
    print("SW Env:")
    print("     torch.__version__ = ", torch.__version__)
    print("     torchvision.__version__ = ", torchvision.__version__) 

    # 固定种子
    
    if args.gpu_id == None:
        # 有GPU但不使用
        cuda = False
        device = torch.device('cpu')
        print('\n\033[1;36;40mUsing CPU \033[0m')
        print("\n\033[1;31;40m *****Warning: Chosen to not use GPU, Use CPU only could be VERY SLOW***** \033[0m")
    else:
        if torch.cuda.is_available():
            cuda = True
            device = torch.device('cuda')
            set_gpu_env(args)
        else:
            # GPU有问题无法使用
            cuda = False
            device = torch.device('cpu')
            print("\n\033[1;31;40m *****Warning: something wrong with CUDA env , Using CPU instead***** \033[0m")

    seed_everything(args)
    return device, cuda

def set_exp_dir(args):
    if args.exp_name:
        exp_dir = os.path.join(args.exps_dir, args.exp_name)
    else:
        exp_dir = os.path.join(args.exps_dir, "exp_" + str(time.strftime('%Y_%m_%d_%H_%M_%S')))
    return exp_dir