import argparse
from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments, Trainer, HfArgumentParser
from transformers.trainer_utils import is_main_process
from dataclasses import field, dataclass, asdict
from datasets import load_dataset
from typing import Optional
from peft import get_peft_model
from data_utils import lora_config

def eval_parser():
    parser = argparse.ArgumentParser(description="arguments")
    # 添加模型相关参数
    parser.add_argument("--model_path", type=str, default="yourmodel")
    parser.add_argument("--adapter_path", type=str, default="checkpoints/mix_cl_a50_e10_l5e-4/checkpoint-200")
    parser.add_argument("--device", type=str, default="cuda:0")
    # 添加数据相关参数
    parser.add_argument("--input_file", type=str, default="./data/test_set_B/Q_B_without_answer.jsonl")
    parser.add_argument("--output_file", type=str, default="final_res")
    return parser

@dataclass()
class Datarguments:
    train_dataset_path: Optional[str] = field(
        default="test.parquet"
    )
    eval_dataset_path: Optional[str] = field(
        default="eval.parquet"
    )
    eval_size: Optional[int] = field(
        default=256
    )
    max_length: Optional[int] = field(
        default=512
    )
    num_data_proc: Optional[int] = field(
        default=16
    )
    skip_eos_token: Optional[bool] = field(
        default=False
    )

@dataclass()
class ModelArgumnets:
    model_path: Optional[str] = field(
        default="/data02/models/CodeLlama-7b-hf"
    )

@dataclass()
class MyTrainingArguments(TrainingArguments):
    run_name: Optional[str] = field(
        default="codellm"
    )
    output_dir: Optional[str] = field(
        default="checkpoints/"
    )
    per_device_train_batch_size: Optional[int] = field(
        default=4
    )
    per_device_eval_batch_size: Optional[int] = field(
        default=4
    )
    num_train_epochs: Optional[int] = field(
        default=20
    )
    weight_decay: Optional[float] = field(
        default=0
    )
    learning_rate: Optional[float] = field(
        default=1e-7
    )
    lr_scheduler_type: Optional[str] = field(
        default="cosine"
    )
    warmup_ratio: Optional[float] = field(
        default=0.1
    )
    eval_strategy: Optional[str] = field(
        default="steps"
    )
    eval_steps: Optional[int] = field(
        default=100
    )
    load_best_model_at_end: Optional[bool] = field(
        default=True
    )
    logging_strategy: Optional[str] = field(
        default="steps"
    )
    logging_steps: Optional[int] = field(
        default=1
    )
    save_strategy: Optional[str] = field(
        default="steps"
    )
    save_steps: Optional[int] = field(
        default=100
    )
    save_total_limit: Optional[int] = field(
        default=10
    )
    save_only_model: Optional[bool] = field(
        default=True
    )
    bf16: Optional[bool] = field(
        default=True
    )

def train_parser():
    parser = HfArgumentParser((ModelArgumnets, Datarguments,  MyTrainingArguments))
    return parser.parse_args_into_dataclasses()