import os.path
import platform
import yaml
from pprint import pprint

rank = [64]
search_params = {
    "DORA":{
        "key":"lora_rank",
        "value": rank
    },

    "LoRA+": {
        "key": "lora_rank",
        "value": rank
    },

    "PiSSA": {
        "key": "lora_rank",
        "value": rank
    },

    "LongLoRA": {
        "key": "lora_rank",
        "value": rank
    },

    "galore": {
        "key": "galore_rank",
        "value": rank
    },

    "LLaMA_Pro": {
        "key": "freeze_trainable_layers",
        "value": [2,4,8]
    },



}

LORA_IMPROVE = {
    "DORA":{
        "finetuning_type":"lora",
        "lora_target": "all",
        "lora_alpha": 64,
        "lora_dropout": 0.1,

        "use_dora":True,
    },
    "LoRA+":{
        "finetuning_type":"lora",
        "lora_target": "all",
        "lora_alpha": 64,
        "lora_dropout": 0.1,


        "loraplus_lr_ratio":32, #LoRA plus learning rate ratio (lr_B / lr_A). (default: None)
        "loraplus_lr_embedding":1e-06, #LoRA plus learning rate for lora embedding layers. (default: 1e-06)
    }
    ,
    "PiSSA":{
        "finetuning_type": "lora",
        "lora_target": "all",
        "lora_alpha": 64,
        "lora_dropout": 0.1,

        "pissa_init":True, # Whether to initialize a PiSSA adapter. (default: False)
        "pissa_iter":16, # The number of iteration steps performed by FSVD in PiSSA. Use -1 to disable it. (default: 16)
        "pissa_convert":True # Whether to convert the PiSSA adapter to a normal LoRA adapter. (default: False)
    },
    "LongLoRA":{
        "finetuning_type": "lora",
        "lora_target": "all",
        "lora_alpha": 64,
        "lora_dropout": 0.1,


        "shift_attn": True
    },
    "galore":{
            "finetuning_type": "full",
            "use_galore": True,
            "glore_target": "all",
            'galore_update_interval':200, # Number of steps to update the GaLore projection. (default: 200)
            "galore_scale":0.25, # GaLore scaling coefficient. (default: 0.25)
            "galore_proj_type": "std", #{std,reverse_std,right,left,full} Type of GaLore projection. (default: std)
            "galore_layerwise": False #  Whether or not to enable layer-wise update to further save memory. (default: False)
        },

    'LLaMA_Pro':{
        "finetuning_type": "freeze",
        "freeze_trainable_modules": "all",
        "use_llama_pro": True
    },
}



tasks = {
        "任意门实体识别_实体识别": {
            "dataset": "任意门实体识别_实体识别"
        }
    }


if platform.system() == "Windows":
    model_names = {
        "qwen2-1.5B-Instruct": {
            "model_name_or_path": "D:\models\qwen1.5-1.8b",
            "template":"qwen"
        }
    }
    save_root = r'D:\models\lora_test'
else:
    model_names = {
        "qwen2-1.5B-Instruct":{
            "model_name_or_path":"/home/jovyan/lwr/Qwen/Qwen2-1.5B-Instruct"
        }
    }
    save_root = r'D:\models\lora_test'

base_yaml = r'D:\PycharmProjects\LLaMA-Factory\examples\train_lora\base_lora_sft.yaml'

def make_save_path(dataset,model_name,lore_type,key,key_value):
    fold_name = f"{dataset}_{model_name}_{lore_type}_{key}_{key_value}"
    return os.path.join(save_root,fold_name)


def convert_yaml_to_dict(yaml_file):
    with open(yaml_file, 'r',encoding='utf-8') as file:
        config = yaml.safe_load(file)
    return config

def make_cmd_str(args):
    cmd_str = "CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 torchrun --nproc_per_node 8 --nnodes 1 --node_rank 0 --master_addr 127.0.0.1 --master_port 29500 src/train.py"
    for key,value in args.items():
        cmd_str += f" --{key} {value}"
    return cmd_str

def make_cmd_file(args_list):
    with open("lora_run.sh",'w',encoding='utf-8') as fwrite:
        for args in args_list:
            cmd_str = make_cmd_str(args)
            fwrite.write(cmd_str + "\n")

args_list = []
for task_name,task_info in tasks.items():
    for model_name,model_info in model_names.items():
        for lora_type,lora_params in LORA_IMPROVE.items():

            search_params_lora = search_params[lora_type]
            for search_v in search_params_lora['value']:
                search_k = search_params_lora['key']
                train_args = convert_yaml_to_dict(base_yaml)

                train_args.update(lora_params)
                train_args[search_k] = search_v
                train_args["model_name_or_path"]= model_info["model_name_or_path"]
                train_args["template"] = model_info["template"]
                train_args["dataset"] = task_info["dataset"]

                output_folder = make_save_path(task_info["dataset"],model_name,
                                               lora_type,search_k,search_v)
                train_args["output_dir"] = output_folder
                pprint(train_args)
                args_list.append(train_args)

make_cmd_file(args_list)






















