# 定义一个ArgumentParser实例:
import argparse
import os

from utils import get_time


def getparser():
    parser = argparse.ArgumentParser(
        prog='example',  # 程序名
        description='cross modal deep learning experiment.',  # 描述
        epilog='Copyright(r), 2024, gfdr5 ,2024-11-04'  # 说明信息
    )
    weights = './model_param/original_updown_backbone.pth'
    data_path = 'D:/AAExp/Dataset/f30k'
    vocab_path = 'D:/AAExp/Dataset/vocab'
    weights_vit = './model_param/jx_vit_base_patch16_224_in21k-e5005f0a.pth'

    # weights = '/home/jsjlab/lt/Example/model/param/original_updown_backbone.pth'
    # data_path = '/home/master/crossmodal/Dataset/f30k'
    # vocab_path = '/home/master/crossmodal/Dataset/vocab/'

    # weights_vit = "/home/jsjlab/lt/Example/model/param/jx_vit_base_patch16_224_in21k-e5005f0a.pth"

    # weights = '/home/jsjlab/lt/Example/model/param/original_updown_backbone.pth'
    # data_path = '/root/autodl-tmp/f30k'
    # vocab_path = '/root/autodl-tmp/vocab'
    # data_path = '/clusters/data_4090/user_data/liteng/projectList/SCAN/dataset/data/data/Flickr 30k/'
    # vocab_path = '/clusters/data_4090/user_data/liteng/projectList/SCAN/dataset/vocab/vocab'
    # weights_vit = "/home/jsjlab/lt/Example/model/param/jx_vit_base_patch16_224_in21k-e5005f0a.pth"
    data_name = 'f30k'
    # 数据集相关
    parser.add_argument('--data_path', default=data_path, type=str)  #
    parser.add_argument("--data_name", default=data_name, help="{coco,f30k,cc152k}")  #
    parser.add_argument("--vocab_path", default=vocab_path)  #
    # 权重相关
    parser.add_argument('--backbone_path', type=str, default=weights, help='path to the pre-trained backbone net')  #
    parser.add_argument('--weights', type=str, default=weights_vit, help='path to the vit net')  #
    # 训练相关
    parser.add_argument('--batch_size', default=128, type=int)  #
    parser.add_argument('--epoches', default=25, type=int)  #
    parser.add_argument('--resume', default='', type=str)  # 断点续训练
    parser.add_argument('--reset_start_epoch', default=None, type=int)  #
    parser.add_argument('--workers', default=1, type=int)  #
    parser.add_argument('-lr', default=5e-4, type=float)  #
    parser.add_argument("--lr_update", default=15, type=int)  #
    parser.add_argument("--grad_clip", default=2.0, type=float, help="Gradient clipping threshold.")  #
    # 损失函数相关
    parser.add_argument("--margin", default=0.2, type=float)  #
    parser.add_argument("--max_violation", default=True, type=bool)  #
    parser.add_argument("--agg_func", default='GPO', type=str)  #
    parser.add_argument("--cross_attn", default='t2i', type=str)  #
    parser.add_argument('--lambda_softmax', default=9., type=float, help='Attention softmax temperature.')
    # 数据维度
    parser.add_argument("--img_dim", default=2048, type=int)  #
    parser.add_argument("--word_dim", default=300, type=int)  #
    parser.add_argument('--embed_size', default=1024, type=int)  #
    # 模型相关
    parser.add_argument("--imgnorm", default=True, type=bool, help="normalize the image embeddings.", )  #
    parser.add_argument("--txtnorm", default=True, type=bool, help="normalize the text embeddings.", )  #
    parser.add_argument("--bi_gru", default=True, type=bool, help="Use bidirectional GRU.")  #
    parser.add_argument("--num_layers", default=1, type=int, help="Number of GRU layers.")  #
    # 日志相关
    parser.add_argument("--runs_dir", default='./results/runs/' + get_time())  # tensorboard结果
    parser.add_argument("--result_model", default='./results/models')  # 模型保存存档点
    parser.add_argument("--logs_dir", default='./results/logs/' + get_time())  # 日志结果
    parser.add_argument("--suffix", default='')  # 文件名
    parser.add_argument("--log_step", default=10, type=int)  #

    args = parser.parse_args()
    return args


def save_parameters(opt, save_path):
    varx = vars(opt)
    base_str = ''
    for key in varx:
        base_str += str(key)
        if isinstance(varx[key], dict):
            for sub_key, sub_item in varx[key].items():
                base_str += '\n\t' + str(sub_key) + ': ' + str(sub_item)
        else:
            base_str += '\n\t' + str(varx[key])
        base_str += '\n'

    with open(os.path.join(save_path, 'params.txt'), 'w', encoding='utf8') as f:
        f.write(base_str)
