from configparser import ConfigParser
from transformers import TrainingArguments
from dataclasses import dataclass
from typing import Optional

# 定义模型相关的参数
@dataclass
#TODO
class ModelArguments:
    pretrained_model_name_or_path: Optional[str] = "codellama/CodeLlama-7b-hf"

# 定义与数据处理相关的参数
@dataclass
class DataArguments:
    train_dataset_path: Optional[str] = "test.parquet"
    eval_dataset_path: Optional[str] = "eval.parquet"
    eval_size: Optional[int] = 256
    max_length: Optional[int] = 512
    num_data_proc: Optional[int] = 16
    skip_eos_token: Optional[bool] = False

# 定义训练相关的参数
@dataclass
class MyTrainingArguments(TrainingArguments):
    run_name: Optional[str] = "atom"
    output_dir: Optional[str] = "../../checkpoints/"
    per_device_train_batch_size: Optional[int] = 4
    per_device_eval_batch_size: Optional[int] = 4
    num_train_epochs: Optional[int] = 20
    weight_decay: Optional[float] = 0
    learning_rate: Optional[float] = 1e-7
    lr_scheduler_type: Optional[str] = "cosine"
    warmup_ratio: Optional[float] = 0.1
    eval_strategy: Optional[str] = "steps"
    eval_steps: Optional[int] = 100
    logging_strategy: Optional[str] = "steps"
    logging_steps: Optional[int] = 1
    save_strategy: Optional[str] = "steps"
    save_steps: Optional[int] = 100
    save_total_limit: Optional[int] = 10
    save_only_model: Optional[bool] = True
    bf16: Optional[bool] = True

def load_args_from_ini(ini_file: str):
    # 创建ConfigParser对象
    config = ConfigParser()
    config.read(ini_file)
    
    # 从配置文件中读取参数，增加异常处理
    model_args = ModelArguments(
        #TODO
        pretrained_model_name_or_path=config.get("model", "pretrained_model_name_or_path", fallback="codellama/CodeLlama-7b-hf")
    )
    
    data_args = DataArguments(
        train_dataset_path=config.get("data", "train_dataset_path", fallback="test.parquet"),
        eval_dataset_path=config.get("data", "eval_dataset_path", fallback="eval.parquet"),
        eval_size=config.getint("data", "eval_size", fallback=256),  # 如果配置项缺失，使用默认值
        max_length=config.getint("data", "max_length", fallback=512),  # 默认最大长度
        num_data_proc=config.getint("data", "num_data_proc", fallback=16),  # 默认数据处理线程数
        skip_eos_token=config.getboolean("data", "skip_eos_token", fallback=False)  # 是否跳过EOS token
    )
    
    training_args = MyTrainingArguments(
        run_name=config.get("training", "run_name", fallback="atom"),
        output_dir=config.get("training", "output_dir", fallback="../../checkpoints/"),
        per_device_train_batch_size=config.getint("training", "per_device_train_batch_size", fallback=4),
        per_device_eval_batch_size=config.getint("training", "per_device_eval_batch_size", fallback=4),
        num_train_epochs=config.getint("training", "num_train_epochs", fallback=20),
        weight_decay=config.getfloat("training", "weight_decay", fallback=0),
        learning_rate=config.getfloat("training", "learning_rate", fallback=1e-7),
        lr_scheduler_type=config.get("training", "lr_scheduler_type", fallback="cosine"),
        warmup_ratio=config.getfloat("training", "warmup_ratio", fallback=0.1),
        eval_strategy=config.get("training", "eval_strategy", fallback="steps"),
        eval_steps=config.getint("training", "eval_steps", fallback=100),
        logging_strategy=config.get("training", "logging_strategy", fallback="steps"),
        logging_steps=config.getint("training", "logging_steps", fallback=1),
        save_strategy=config.get("training", "save_strategy", fallback="steps"),
        save_steps=config.getint("training", "save_steps", fallback=100),
        save_total_limit=config.getint("training", "save_total_limit", fallback=10),
        save_only_model=config.getboolean("training", "save_only_model", fallback=True),
        bf16=config.getboolean("training", "bf16", fallback=True)
    )
    print(model_args)
    print(data_args)
    print(training_args)
    return model_args, data_args, training_args

# 加载参数
# model_args, data_args, training_args = load_args_from_ini("train_config.ini")

# # 打印加载的参数

