# -*- coding: utf-8 -*-
# @Time    : 2025/2/11 19:08
# @Author  : 
# @File    : config.py
# @Software: PyCharm 
# @Comment :

import torch

class Config:
    # 模型路径
    pretrained_model_base_path = './pretrained_models'  # 预训练模型路径
    lora_model_base_path = './lora_models'        # lora 模型路径
    model_name = 'Qwen/Qwen2.5-Coder-7B-Instruct'
    pretrained_model_path = pretrained_model_base_path + '/' + model_name
    lora_model_path = lora_model_base_path + '/' + model_name

    # 数据集路径
    raw_datasets_base_path = "./raw_datasets"
    processed_datasets_base_path = "./processed_datasets"
    dataset_name = "emilgoh/verilog-dataset-v3"
    raw_datasets_path = raw_datasets_base_path + '/' + dataset_name
    processed_datasets_path = processed_datasets_base_path + '/' + dataset_name

    # LoRA 参数
    target_modules = [
        "q_proj",
        "k_proj",
        "v_proj",
        "o_proj",
        "gate_proj",
        "up_proj",
        "down_proj",
    ]

    lora_rank = 16
    lora_alpha = 32
    lora_dropout = 0.1

    # 训练参数
    epochs = 3
    learning_rate = 5e-5
    """
    注意，当我们设置了显存重计算的功能，则eval steps之类的参数自动进行相应的调整，比如我们设置这个参数前，256的batch，我们希望10个batch评估一次，
    即10个steps进行一次eval，当时改为batch size=32并且 gradient_accumulation_steps=8，则默认trainer会 8*10=80个steps 进行一次eval。
    """
    gradient_accumulation_steps = 2
    batch_size = 12
    logging_steps = 20
    save_step = 100
    eval_step = 100
    torch_dtype = torch.float16

    # swanlab
    swanlab_api_key = "<your swanlab_api_key>"

    # eval
    eval_lora = True
    eval_batch = 12
