import _init_paths
import os
import argparse
import torch
import subprocess
import datetime
from fakenews.function.config import config, update_config
from fakenews.function.train import train_net
from fakenews.function.test import test_net
from common.utils.create_logger import create_logger


def parse_args():
    parser = argparse.ArgumentParser('Train Cognition Network')
    parser.add_argument('--cfg', type=str, default = './cfgs/phemedata_cv/vqa_4x16G_fp32.yaml', help='path to config file')
    parser.add_argument('--model-dir', type=str, default = './', help='root path to store checkpoint')
    parser.add_argument('--log-dir', type=str, help='tensorboard log dir')
    parser.add_argument('--dist', help='whether to use distributed training', default=False, action='store_true')
    parser.add_argument('--slurm', help='whether this is a slurm job', default=False, action='store_true')
    parser.add_argument('--do-test', help='whether to generate csv result on test set',
                        default=False, action='store_true')
    parser.add_argument('--do-adv-test', help='whether to do test with adv test set',
                        default=False, action='store_true')
    parser.add_argument('--cudnn-off', help='disable cudnn', default=False, action='store_true')

    # easy test pretrain model
    parser.add_argument('--partial-pretrain', type=str)
    parser.add_argument('--adv', default = 'CLS', type=str,help='for grid search, overwrite config')
    parser.add_argument('--gpus', type=str,help='overwrite config file')
    parser.add_argument('--cv', default = 0 ,  type=int,help='cross validation type 0 2 4 -1 -2')
    parser.add_argument('--fusion', type=str,help='')
    
    # parser.set_defaults(cv=False)

    args = parser.parse_args()
    if args.cfg is not None:
        update_config(args.cfg)
    if args.fusion is not None:
        config.NETWORK.FUSION_MODE = args.fusion
    if args.gpus is not None:
        config.GPUS = args.gpus
    if args.cv is not None:
        config.DATASET.CROSS_VALIDATION = args.cv
    if args.adv is not None:
        config.NETWORK.VLBERT.perturbation = args.adv
    if args.model_dir is not None:
        config.OUTPUT_PATH = os.path.join(args.model_dir, config.OUTPUT_PATH)

    if args.partial_pretrain is not None:
        config.NETWORK.PARTIAL_PRETRAIN = args.partial_pretrain

    if args.slurm:
        proc_id = int(os.environ['SLURM_PROCID'])
        ntasks = int(os.environ['SLURM_NTASKS'])
        node_list = os.environ['SLURM_NODELIST']
        num_gpus = torch.cuda.device_count()
        addr = subprocess.getoutput(
            'scontrol show hostname {} | head -n1'.format(node_list))
        os.environ['MASTER_PORT'] = str(29500)
        os.environ['MASTER_ADDR'] = addr
        os.environ['WORLD_SIZE'] = str(ntasks)
        os.environ['RANK'] = str(proc_id)
        os.environ['LOCAL_RANK'] = str(proc_id % num_gpus)

    return args, config

def ablation_test(args,config):
    #不傅co att，和，不使用，fusion
    advs = ['CLS','PIC']
    fusionmodes = ['RAW_MAX_ALIGN_MINUS', 'TRAN_MAX_NOFU']
    cfgs =['./cfgs/phemedata_cv/vqa_4x16G_fp32.yaml','./cfgs/mediaeval_cv/vqa_4x16G_fp32.yaml']
    for i,c in zip(range(2),cfgs):
        update_config(c)
        for a in advs:
            config.NETWORK.FUSION_MODE = f
            for f in fusionmodes:
                config.NETWORK.VLBERT.perturbation = a
                args.adv = a 
                if i == 0:
                    ten_fold_trains(args,config)
                else:
                    single_train(args,config)

def single_test(args, config, ckpt_fn = 'vqa3a-0.8646-0014-05_31_12.53.18-best.model'): 
    # biran
    # vqa3pCLS-0.8750-0017-08_09_17.28.35-best.model  原模型  
    # vqa3pTEXT-0.8729-0014-08_09_17.25.04-best.model
    # vqa3pPIC-0.8646-0007-08_09_17.18.49-best.model
    # vqa3p_NO-0.8646-08_09_15.46.06.model
    # vqa3a-0.8646-0014-05_31_12.53.18-best.model 文本攻击后模型  只有攻击后的数据
    # vqa3a-0.9896-0019-05_31_14.09.06-best.model 文本攻击后模型  源数据+攻击后的数据
    _, train_output_path = create_logger(config.OUTPUT_PATH, args.cfg, config.DATASET.TRAIN_IMAGE_SET,
                                             split='train')
    #ckpt_path = os.path.join(train_output_path, ckpt_fn)
    ckpt_path = os.path.join('./model', ckpt_fn)

    test_net(args, config, ckpt_path)

def single_train(args, config):
    #lyd args, config = parse_args()
    if args.do_adv_test:
        config.DATASET.VAL_IMAGE_SET = config.DATASET.VAL_IMAGE_SET + "_adv"
    rank, model = train_net(args, config)
    if args.do_test: # and (rank is None or rank == 0):#不执行
        test_net(args, config)

def grid_search4(args, config):
    p1 = ['TRAN','RAW']
    p2 = ['MEAN', 'MAX']
    p3 = ['RAW', 'ALIGN']
    p4 = ['MINUS', 'HADAMARD']

    for l1 in p1:
        for l2 in p2:
            for l3 in p3:
                for l4 in p4:
                    fusion_mode =l1 + '_' + l2 + '_' + l3 + '_' + l4 
                    config.NETWORK.FUSION_MODE =  fusion_mode
                    config.MODEL_PREFIX = fusion_mode
                    ten_fold_trains(args,config)

def grid_search3(args, config):
    p0 = ['SKIPVLB']
    p1 = ['TRAN','RAW']#['TRAN','RAW']
    p2 = ['MAX']#['MEAN', 'MAX']
    p3 = ['ALIGN']#['RAW', 'ALIGN']
    p4 = ['MINUS']#, 'HADAMARD']
    
    
    for l1 in p1:
        for l2 in p2:
            for l3 in p3:
                for l4 in p4:
                    for l0 in p0:
                        fusion_mode =l0 + '_' + l1 + '_' + l2 + '_' + l3 + '_' + l4 
                        config.NETWORK.FUSION_MODE =  fusion_mode
                        config.MODEL_PREFIX = fusion_mode + config.NETWORK.VLBERT.perturbation
                        print("model {}_adv_{}, train:{},val:{}, {}\n".format(config.NETWORK.VLBERT.perturbation,fusion_mode, config.DATASET.TRAIN_IMAGE_SET,config.DATASET.TEST_IMAGE_SET, \
                                datetime.datetime.now().strftime("%m/%d/%Y, %H:%M:%S")))
                        rank, model = train_net(args, config)
                        if args.do_test:# and (rank is None or rank == 0):#不执行
                            test_net(args, config)

def grid_search2(args, config):
    b = 6.25e-7
    LR = [b, 5*b, 10*b,20*b,50*b,100*b]
    for lr in LR:
        config.TRAIN.LR = lr
        print("model {}_adv_lr{}, train:{},val:{}, {}\n".format(config.NETWORK.VLBERT.perturbation,lr, config.DATASET.TRAIN_IMAGE_SET,config.DATASET.TEST_IMAGE_SET, \
                datetime.datetime.now().strftime("%m/%d/%Y, %H:%M:%S")))
        rank, model = train_net(args, config)
        if args.do_test:# and (rank is None or rank == 0):#不执行
            test_net(args, config)

def grid_search(args, config):
    delta =[0.1, 0.01, 0.001]
    zeta = [0.1, 0.01, 0.001, 0.0001, 0.00001, 0.000001]
    for d in delta:
        for z in zeta:
            config.NETWORK.ADV_COEFFICIENT = z
            config.TRAIN.DELTA = d
            model_name = config.MODEL_PREFIX + '_d' + str(d) + '_z' + str(z)
            print("model {}, train:{},val:{}, {}\n".format(model_name, config.DATASET.TRAIN_IMAGE_SET,config.DATASET.TEST_IMAGE_SET, \
                     datetime.datetime.now().strftime("%m/%d/%Y, %H:%M:%S")))
            rank, model = train_net(args, config)
            if args.do_test:# and (rank is None or rank == 0):#不执行
                test_net(args, config)
    
def ten_fold_trains(args,config):
    vlbert_perturbations =['NO','TEXT','PIC','CLS']
    if args.adv is not None:
        vlbert_perturbations = [args.adv]   
    mediaeval_ds = []
    pheme_ds = []
    for s in range(10):
        mediaeval_fn2 = 'mte' + str(s) #test
        mediaeval_fn1 = 'mtr' + str(s) # train
        mediaeval_ds.append([mediaeval_fn1,mediaeval_fn2])
        pheme_fn1 = 'pte' + str(s) 
        pheme_fn2 = 'ptr' + str(s) 
        pheme_ds.append([pheme_fn2,pheme_fn1])
    if config.DATASET.DATASET_PATH == './data/PhemeData':
        repeat_trains(pheme_ds,vlbert_perturbations, args, config)
    if config.DATASET.DATASET_PATH == './data/Mediaeval':
        repeat_trains(mediaeval_ds,vlbert_perturbations, args, config)


def two_fold_trains(args, config):
    # 5 time 2fold train
    vlbert_perturbations = ['TEXT','PIC','CLS','NO']
    if args.adv is not None:
        vlbert_perturbations = [args.adv]
    mediaeval_ds = []
    pheme_ds = []
    for s in range(5):
        mediaeval_fn1 = 'm' + str(s) + '_0' # split s, slice 0
        mediaeval_fn2 = 'm' + str(s) + '_1' # split s, slice 1
        mediaeval_ds.append([mediaeval_fn1,mediaeval_fn2])
        mediaeval_ds.append([mediaeval_fn2,mediaeval_fn1])
        pheme_fn1 = 'p' + str(s) + '_0' # split s, slice 0
        pheme_fn2 = 'p' + str(s) + '_1' # split s, slice 1
        pheme_ds.append([pheme_fn1,pheme_fn2])
        pheme_ds.append([pheme_fn2,pheme_fn1])
    if config.DATASET.DATASET_PATH == './data/PhemeData':
        repeat_trains(pheme_ds,vlbert_perturbations, args, config)
    if config.DATASET.DATASET_PATH == './data/Mediaeval':
        repeat_trains(mediaeval_ds,vlbert_perturbations, args, config)

def repeat_trains(ds_list, perturbation_list, args, config):
    cv_folds = ds_list#mediaeval_ds
    for p in perturbation_list:
        config.NETWORK.VLBERT.perturbation = p
        model_name = config.MODEL_PREFIX + '_' + p + '_adv'
        print("folds to train: \n {} ".format(cv_folds))
        for s in cv_folds:
            config.DATASET.TRAIN_IMAGE_SET = s[0]
            config.DATASET.VAL_IMAGE_SET = s[1]
            config.DATASET.TEST_IMAGE_SET = s[1]
            print("model {}, train:{},val:{}, start time:{} \n".format(model_name, config.DATASET.TRAIN_IMAGE_SET,config.DATASET.VAL_IMAGE_SET, \
                    datetime.datetime.now().strftime("%m/%d/%Y, %H:%M:%S")))
            rank, model = train_net(args, config)
            if args.do_test:# and (rank is None or rank == 0):#不执行
                test_net(args, config)
def main():
    args, config = parse_args()
    # print(args)
    # print(config)
    # exit(0)
    if config.DATASET.CROSS_VALIDATION == 2:
        two_fold_trains(args, config)
    elif config.DATASET.CROSS_VALIDATION == 10:
        ten_fold_trains(args, config)
    elif config.DATASET.CROSS_VALIDATION == 0: 
        single_train(args, config)
    elif config.DATASET.CROSS_VALIDATION == -1:
        grid_search(args, config)
    elif config.DATASET.CROSS_VALIDATION == -2:
        grid_search2(args, config)
    elif config.DATASET.CROSS_VALIDATION == -3:
        grid_search3(args, config)
    elif config.DATASET.CROSS_VALIDATION == -4:
        grid_search4(args, config)
    elif config.DATASET.CROSS_VALIDATION == -5:
        single_test(args,config)
    elif config.DATASET.CROSS_VALIDATION == -10:
        ablation_test(args,config)
    else:
        print("config.DATASET.CROSS_VALIDATION value is not illegal")

def test():
    args, config = parse_args()
    single_train(args, config)
    single_test(args,config)

if __name__ == '__main__':
    main()
else:
    print('nothing123 test')
    test()

