"""
Usage:
    main.py train [--model=<file>] [--dataset_dir=<folder>] [options]
    main.py test [--model=<file>] [--dataset_dir=<folder>] [options]
    main.py free_test [--model=<file>] [--dataset_dir=<folder>] [--index=<int>] [--layer=<int>] [options]
    main.py max_act_patch [--model=<file>] [--dataset_dir=<folder>] [--index=<int>] [--layer=<int>] [options]
    main.py gradCam [--model=<file>] [--dataset_dir=<folder>] [--index=<int>] [--layer=<int>] [options]
    main.py umap [--model=<file>] [--dataset_dir=<folder>] [--index=<int>] [--layer=<int>] [options]

Options:
    -h --help                               show this screen.
    --cuda                                  use GPU
    --model=<str>                           'CAE', 'CAE_V2', 'CAE_V3', 'TripletLoss_Knn', 'TripletLoss_Knn_v2', ...
    --config_name=<str>                     使用配置进行训练
    --pretrained_ckpt_path=<file>           if specified when trainning, will load pretrained params to model
    --dataset_dir=<folder>                  [default: ./dataset/nankai_raw]

    --max_epochs=<int>                      max epoch [default: 30]
    --batch_size=<int>                      batch size [default: 128]
    --lr=<float>                            learning rate [default: 0.0003]
    --lr_decay=<bool>                       learning rate decay [default: False]
    --weight_decay=<float>                  L2 regular param λ: [default: 0.1]
    --dropout=<float>                       dropout [default: 0.3]
    --patience=<int>                        wait for how many iterations to decay learning rate [default: 1]
    --max-num-trial=<int>                   terminate training after how many trials [default: 5]
    --ckpt_save_path=<file>                 model save path [default: model.ckpt]

    --seeds=<str>                           comma-separated seeds [default: 42,7,13,21,36]
    --seed=<int>                            deprecated, use --seeds instead [default: 42]

    --train-src=<file>                      train source file
    --train-tgt=<file>                      train target file
    --dev-src=<file>                        dev source file
    --dev-tgt=<file>                        dev target file
    --vocab=<file>                          vocab file
    --embed-size=<int>                      embedding size [default: 256]
    --hidden-size=<int>                     hidden size [default: 256]
    --clip-grad=<float>                     gradient clipping [default: 5.0]
    --log-every=<int>                       log every [default: 10]
    --beam-size=<int>                       beam size [default: 5]
    --sample-size=<int>                     sample size [default: 5]
    --uniform-init=<float>                  uniformly initialize all parameters [default: 0.1]
    --valid-niter=<int>                     perform validation after how many iterations [default: 2000]
    --max-decoding-time-step=<int>          maximum number of decoding time steps [default: 70]
    
    --index=<str>                           the index of dataset, which is used for free_test[default: 0]
    --layer=<str>                           the layer of model, which is used for hook of free_test[default: 0]
"""
from datetime import datetime
import os

import torch
from torch.utils.data import DataLoader
import torch.nn.functional as F
from torchinfo import summary

from docopt import docopt
from tqdm import tqdm
from pathlib import Path

from models.MDW_Net import MDW_Net
from models.MobileNetV4_Font import MobileNetV4_Font

import trainer
import evaluater

from dataset import ZhCharDataset
from dataset_96 import ZhCharDataset_96
from utils import set_seed, set_clsfier


def train(args, seed, base_dir):
    model_name = args.get('--model')
    config_name = args.get('--config_name')

    # Update config with seed-specific paths
    os.makedirs(f"./ckpt/{base_dir}", exist_ok=True)
    args['--ckpt_save_path'] = f"./ckpt/{base_dir}/"

    os.makedirs(f"./logs/{base_dir}_train", exist_ok=True)
    os.makedirs(f"./runs/{base_dir}_train", exist_ok=True)
    log_dir = f"./logs/{base_dir}_train"
    run_dir = f"./runs/{base_dir}_train"

    max_epochs = int(args.get('--max_epochs'))
    batch_size = int(args.get('--batch_size'))
    lr = float(args.get('--lr'))
    lr_decay = eval(args.get('--lr_decay')) #if True, will apply lr_decay during trainning
    weight_decay = float(args.get('--weight_decay'))
    ckpt_save_path = args.get('--ckpt_save_path')   # checkpoint save path

    pretrained_ckpt_path = args.get('--pretrained_ckpt_path')

    dataset_dir = args.get('--dataset_dir')
    train_dataset = ZhCharDataset(dataset_dir, mod='train', transform=True)
    val_dataset = ZhCharDataset(dataset_dir, mod='val', transform=True)
    num_classes = len(train_dataset.label2i)

    if model_name == 'MDW_Net':
        train_dataset = ZhCharDataset_96(dataset_dir, mod='train', transform=True)
        val_dataset = ZhCharDataset_96(dataset_dir, mod='val', transform=True)
        num_classes = len(train_dataset.label2i)

        from models.MDWNet_config import MDW_CONFIGS
        model_conf = set_clsfier(MDW_CONFIGS[config_name], num_classes)
        model = MDW_Net(model_conf)
        tconf = trainer.TrainerConfig(max_epochs=50, batch_size=64, learning_rate=6e-4,
                                        lr_decay=True, lr_decay_type='exp', 
                                        warmup_num=len(train_dataset)//5, final_num=8*len(train_dataset),
                                        weight_decay=None, betas=(0.9, 0.99), optim='Lion', ckpt_path=ckpt_save_path,
                                        train_type='feature',
                                        seed=seed, log_dir=log_dir, run_dir=run_dir)
    elif model_name == 'MobileNetV4_Font':
        train_dataset = ZhCharDataset_96(dataset_dir, mod='train', transform=True)
        val_dataset = ZhCharDataset_96(dataset_dir, mod='val', transform=True)
        num_classes = len(train_dataset.label2i)

        from models.MDWNet_config import MDW_CONFIGS
        model_conf = set_clsfier(MDW_CONFIGS[config_name], num_classes)
        model = MobileNetV4_Font(model_conf)
        tconf = trainer.TrainerConfig(max_epochs=50, batch_size=64, learning_rate=6e-4,
                                        lr_decay=True, lr_decay_type='exp', 
                                        warmup_num=len(train_dataset)//5, final_num=8*len(train_dataset),
                                        weight_decay=None, betas=(0.9, 0.99), optim='Lion', ckpt_path=ckpt_save_path,
                                        train_type='feature',
                                        seed=seed, log_dir=log_dir, run_dir=run_dir)
    else:
        raise ValueError('error: unknowned model name')
    mytrainer = trainer.Trainer(model, train_dataset, val_dataset, tconf, pretrained_ckpt_path)
    mytrainer.train()

def test(args, base_dir):
    model_name = args.get('--model')
    config_name = args.get('--config_name')

    os.makedirs(f"./logs/{base_dir}_test", exist_ok=True)
    os.makedirs(f"./runs/{base_dir}_test", exist_ok=True)
    log_dir = f"./logs/{base_dir}_test"
    run_dir = f"./runs/{base_dir}_test"

    pretrained_ckpt_path = args.get('--pretrained_ckpt_path')

    dataset_dir = args.get('--dataset_dir')
    dataset_dir = Path(dataset_dir)

    assert pretrained_ckpt_path is not None
    if model_name == 'MDW_Net':
        test_dataset = ZhCharDataset_96(dataset_dir, mod='val', transform=True)
        num_classes = len(test_dataset.label2i)
        test_conf= evaluater.EvaluaterConfig(batch_size=1,
                                             log_dir=log_dir,
                                             run_dir=run_dir)
        from models.MDWNet_config import MDW_CONFIGS
        model_conf = set_clsfier(MDW_CONFIGS[config_name], num_classes)
        model = MDW_Net(model_conf)
        model.load(pretrained_ckpt_path)
    elif model_name == 'MobileNetV4_Font':
        test_dataset = ZhCharDataset_96(dataset_dir, mod='val', transform=True)
        num_classes = len(test_dataset.label2i)
        test_conf= evaluater.EvaluaterConfig(batch_size=1,
                                             log_dir=log_dir,
                                             run_dir=run_dir)
        from models.MDWNet_config import MDW_CONFIGS
        model_conf = set_clsfier(MDW_CONFIGS[config_name], num_classes)
        model = MobileNetV4_Font(model_conf)
        model.load(pretrained_ckpt_path)
    else:
        raise ValueError('error: unknowned model name')
    mytester = evaluater.Evaluater(model, test_dataset, test_conf, state='test')
    mytester.test()

def free_test(args, base_dir):
    model_name = args.get('--model')
    config_name = args.get('--config_name')

    os.makedirs(f"./feature_map/{base_dir}_freetest", exist_ok=True)
    img_dir = f"./feature_map/{base_dir}_freetest"

    pretrained_ckpt_path = args.get('--pretrained_ckpt_path')
    dataset_dir = args.get('--dataset_dir')
    index = int(args.get('--index', 0))
    layer = int(args.get('--layer', 0))
    
    # 根据模型选择数据集
    if model_name in ['ArcFont', 'ArcFont_v1', 'ArcFont_v2', 'MobileNet_v3_L', 'SwordNet']:
        dataset = ZhCharDataset_96(dataset_dir, mod='val', transform=True)
    else:
        dataset = ZhCharDataset(dataset_dir, mod='val', transform=True)
    
    # 初始化模型
    if model_name == 'MDW_Net':
        dataset = ZhCharDataset_96(dataset_dir, mod='val', transform=True)
        num_classes = len(dataset.label2i)
        from models.MDWNet_config import MDW_CONFIGS
        model_conf = set_clsfier(MDW_CONFIGS[config_name], num_classes)
        model = MDW_Net(model_conf)
    elif model_name == 'MobileNetV4_Font':
        dataset = ZhCharDataset_96(dataset_dir, mod='val', transform=True)
        num_classes = len(dataset.label2i)
        from models.MDWNet_config import MDW_CONFIGS
        model_conf = set_clsfier(MDW_CONFIGS[config_name], num_classes)
        model = MobileNetV4_Font(model_conf)
    else:
        raise ValueError(f'Unsupported model: {model_name}')
        
    model.load(pretrained_ckpt_path)
    test_conf= evaluater.EvaluaterConfig(batch_size=64, img_dir=img_dir)
    mytester = evaluater.Evaluater(model, dataset, test_conf, state='free_test')
    mytester.free_test()

def max_act_patch(args, base_dir):
    model_name = args.get('--model')
    config_name = args.get('--config_name')

    os.makedirs(f"./maxActPatch/{base_dir}_maxActPatch", exist_ok=True)
    img_dir = f"./maxActPatch/{base_dir}_maxActPatch"

    pretrained_ckpt_path = args.get('--pretrained_ckpt_path')
    dataset_dir = args.get('--dataset_dir')
    
    # 根据模型选择数据集
    if model_name in ['ArcFont', 'ArcFont_v1', 'ArcFont_v2', 'MobileNet_v3_L', 'SwordNet']:
        dataset = ZhCharDataset(dataset_dir, mod='val', transform=True)
    else:
        dataset = ZhCharDataset(dataset_dir, mod='val', transform=True)
    
    # 初始化模型
    if model_name == 'MDW_Net':
        dataset = ZhCharDataset_96(dataset_dir, mod='val', transform=True)
        num_classes = len(dataset.label2i)
        from models.MDWNet_config import MDW_CONFIGS
        model_conf = set_clsfier(MDW_CONFIGS[config_name], num_classes)
        model = MDW_Net(model_conf)
    elif model_name == 'MobileNetV4_Font':
        dataset = ZhCharDataset_96(dataset_dir, mod='val', transform=True)
        num_classes = len(dataset.label2i)
        from models.MDWNet_config import MDW_CONFIGS
        model_conf = set_clsfier(MDW_CONFIGS[config_name], num_classes)
        model = MobileNetV4_Font(model_conf)
    else:
        raise ValueError(f'Unsupported model: {model_name}')
        
    model.load(pretrained_ckpt_path)
    test_conf= evaluater.EvaluaterConfig(batch_size=64, img_dir=img_dir)
    mytester = evaluater.Evaluater(model, dataset, test_conf, state='max_act_patch')
    mytester.max_act_patch()

def gradCam(args, base_dir):
    model_name = args.get('--model')
    config_name = args.get('--config_name')

    os.makedirs(f"./gradCam/{base_dir}_gradCam", exist_ok=True)
    img_dir = f"./gradCam/{base_dir}_gradCam"

    pretrained_ckpt_path = args.get('--pretrained_ckpt_path')
    dataset_dir = args.get('--dataset_dir')
    
    # 初始化模型
    if model_name == 'MDW_Net':
        train_set = ZhCharDataset_96(dataset_dir, mod='train', transform=True)
        val_set = ZhCharDataset_96(dataset_dir, mod='val', transform=True)
        dataset = (train_set, val_set)
        num_classes = len(dataset[0].label2i)
        from models.MDWNet_config import MDW_CONFIGS
        model_conf = set_clsfier(MDW_CONFIGS[config_name], num_classes)
        model = MDW_Net(model_conf)
    elif model_name == 'MobileNetV4_Font':
        train_set = ZhCharDataset_96(dataset_dir, mod='train', transform=True)
        val_set = ZhCharDataset_96(dataset_dir, mod='val', transform=True)
        dataset = (train_set, val_set)

        num_classes = len(dataset[0].label2i)
        from models.MDWNet_config import MDW_CONFIGS
        model_conf = set_clsfier(MDW_CONFIGS[config_name], num_classes)
        model = MobileNetV4_Font(model_conf)
    else:
        raise ValueError(f'Unsupported model: {model_name}')
        
    model.load(pretrained_ckpt_path)
    test_conf= evaluater.EvaluaterConfig(batch_size=64, img_dir=img_dir)
    mytester = evaluater.Evaluater(model, dataset, test_conf, state='gradCam')
    mytester.gradCam()

def umap_features(args, base_dir):
    model_name = args.get('--model')
    config_name = args.get('--config_name')

    os.makedirs(f"./umap/{base_dir}_umap", exist_ok=True)
    img_dir = f"./umap/{base_dir}_umap"

    pretrained_ckpt_path = args.get('--pretrained_ckpt_path')
    dataset_dir = args.get('--dataset_dir')
    
    # 初始化模型
    if model_name == 'MDW_Net':
        train_set = ZhCharDataset_96(dataset_dir, mod='train', transform=True)
        val_set = ZhCharDataset_96(dataset_dir, mod='val', transform=True)
        dataset = (train_set, val_set)
        num_classes = len(dataset[0].label2i)
        from models.MDWNet_config import MDW_CONFIGS
        model_conf = set_clsfier(MDW_CONFIGS[config_name], num_classes)
        model = MDW_Net(model_conf)
    elif model_name == 'MobileNetV4_Font':
        train_set = ZhCharDataset_96(dataset_dir, mod='train', transform=True)
        val_set = ZhCharDataset_96(dataset_dir, mod='val', transform=True)
        dataset = (train_set, val_set)

        num_classes = len(dataset[0].label2i)
        from models.MDWNet_config import MDW_CONFIGS
        model_conf = set_clsfier(MDW_CONFIGS[config_name], num_classes)
        model = MobileNetV4_Font(model_conf)
    else:
        raise ValueError(f'Unsupported model: {model_name}')
        
    model.load(pretrained_ckpt_path)
    test_conf= evaluater.EvaluaterConfig(batch_size=64, img_dir=img_dir)
    mytester = evaluater.Evaluater(model, dataset, test_conf, state='2d')
    mytester.umap_features()

def main(args):
    seeds = list(map(int, args.get('--seeds').split(',')))
    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    model_name = args.get('--model')
    config_name = args.get('--config_name')
    if args['train']:
        # Create seed-specific directories
        base_dir = f"{timestamp}_{model_name}_{config_name}"

        for seed in seeds:
            set_seed(seed)
            # Run training with current seed
            train(args, seed, base_dir)
    elif args['test']:
        base_dir = f"{timestamp}_{model_name}"
        test(args, base_dir)
    elif args['free_test']:
        base_dir = f"{timestamp}_{model_name}"
        free_test(args, base_dir)
    elif args['max_act_patch']:
        base_dir = f"{timestamp}_{model_name}"
        max_act_patch(args, base_dir)
    elif args['gradCam']:
        base_dir = f"{timestamp}_{model_name}"
        gradCam(args, base_dir)
    elif args['umap']:
        base_dir = f"{timestamp}_{model_name}"
        umap_features(args, base_dir)
    else:
        raise RuntimeError('Invalid run mode')


if __name__ == '__main__':
    args = docopt(__doc__)
    # print(args)
    main(args)
