import platform
from dataclasses import dataclass, field
from typing import Optional
from dataclasses import dataclass, field
from typing import Optional

attn_implementation = "flash_attention_2"
try:
    from flash_attn import flash_attn_func
except Exception as e:
    attn_implementation = "eager"

PRE_TRAIN_TRAIN_FILES = [
   '/content/drive/MyDrive/Model/datasets/wiki_fi.parquet',
   '/content/drive/MyDrive/Model/datasets/baike_chunk_512_5.6M_0.parquet',
   '/content/drive/MyDrive/Model/datasets/baike_chunk_512_5.6M_1.parquet',
   '/content/drive/MyDrive/Model/datasets/java_data1.jsonl',
   '/content/drive/MyDrive/Model/datasets/java_data2.jsonl'
]

PRE_TRAIN_EVAL_FILE = "/content/drive/MyDrive/Model/datasets/pretrain_eval_512_1w.parquet"

@dataclass
class PretrainArguments:
    tokenizer_dir: Optional[str] = field(
        default="./qwen/")
    model_save_dir: Optional[str] = field(
        default="/content/drive/MyDrive/Model/save/pretrain/")
    logs_dir: Optional[str] = field(
        default="/content/drive/MyDrive/Model/log/")
    train_files: list = field(default_factory=lambda: PRE_TRAIN_TRAIN_FILES)
    eval_file: str = PRE_TRAIN_EVAL_FILE
    max_seq_len: Optional[int] = field(
        default=512)
    eval_file: Optional[str] = field(
        default=PRE_TRAIN_EVAL_FILE)

    # Windows 使用默认的attention实现，
    attn_implementation: str = (
        "eager" if platform.system() == "Windows" else attn_implementation
    )

@dataclass
class SFTArguments:
    SFT_FILES = [
        "datasets\\sft\\aplca.parquet",
        "datasets\\sft\\bell.parquet",
        "datasets\\sft\\r1.parquet",
        "datasets\\sft\\train-00000-of-00001.parquet"
    ]
    tokenizer_dir: Optional[str] = field(
        default="model_save\\pretrain")
    sft_from_checkpoint_file: Optional[str] = field(
        default="model_save\\pretrain")
    model_save_dir: Optional[str] = field(
        default="model_save\\sft")
    max_seq_len: Optional[int] = field(
        default=512)


@dataclass
class TrainingArguments:
    """
    Configuration for training model.
    """
    per_device_train_batch_size: Optional[int] = field(
        default=24)
    per_device_eval_batch_size: Optional[int] = field(
        default=4)
    num_train_epochs: Optional[int] = field(
        default=100)
    gradient_accumulation_steps: Optional[int] = field(
        default=10)
    weight_decay: Optional[float] = field(default=0.1)
    log_level: Optional[str] = field(default='info')
    logging_steps: Optional[int] = field(default=20)
    ddp_find_unused_parameters: Optional[bool] = field(
        default=False)
    warmup_steps: Optional[int] = field(default=0)
    lr_scheduler_type: Optional[str] = field(default="cosine", metadata={"help": "Learning rate."})
    learning_rate: Optional[float] = field(default=1e-4)
    evaluation_strategy: Optional[str] = field(default='steps')
    eval_steps: Optional[int] = field(default=100)
    save_steps: Optional[int] = field(default=50)
    save_strategy: Optional[str] = field(default='steps')
    save_total_limit: Optional[int] = field(default=4)
    report_to: Optional[str] = field(default='tensorboard')
    optim: Optional[str] = field(
        default='adamw_torch')
    lr_scheduler_type: Optional[str] = field(
        default='cosine')
    bf16: Optional[bool] = field(default=True)
    logging_first_step: Optional[bool] = field(default=True)

@dataclass
class GenegrateArguments:
    model_dir: Optional[str] = field(
        default='model_save\\dpo')
    tokenizer_dir: Optional[str] = field(
        default='model_save\\dpo')
    save_samples_path: Optional[str] = field(
        default='sample')
    temperature: Optional[float] = field(default=0.3) 
    top_k: Optional[int] = field(default=20)
    top_p: Optional[float] = field(default=0.5)
    do_sample: Optional[bool] = field(default=True)
    num_beams: Optional[int] = field(default=1)
    repetition_penalty: Optional[float] = field(default=1.1)
    max_new_tokens: Optional[int] = field(default=300)

@dataclass
class DpoConfig:
    max_seq_len: Optional[int] = field(default=1024 + 8) # 8 for eos token
    sft_model_file: Optional[str] = field(default='model_save\\model') # SFT后的模型路径
    tokenizer_dir: Optional[str] = field(default='model_save\\model') # tokenizer一般和model权重放在同一个文件夹
    # dpo的训练集
    dpo_train_file: Optional[str] = field(default=r'D:\code\python\law_work\MINILLM\MINI_LLM\datasets\final_dataset\my_dpo_train.json') 
    # dpo的测试集
    dpo_eval_file: Optional[str] = field(default=r'D:\code\python\law_work\MINILLM\MINI_LLM\datasets\final_dataset\my_dpo_train.json') 

    adapter_file: Optional[str] = field(default='/data/dpo/adapter_model.safetensors')
    log_dir: Optional[str] = field(default='D:\code\python\law_work\MINILLM\MINI_LLM\logs') 

    per_device_train_batch_size: Optional[int] = field(default=4)
    num_train_epochs: Optional[int] = field(default=4)
    gradient_accumulation_steps: Optional[int] = field(default=8)
    learning_rate: Optional[float] = field(default=1e-5)
    logging_first_step: Optional[bool] = field(default=True)
    logging_steps: Optional[int] = field(default=20)
    save_steps: Optional[int] = field(default=200)
    output_dir: Optional[str] = field(default='D:\code\python\law_work\MINILLM\MINI_LLM/dpo' ) # dpo模型输出路径
    warmup_steps: Optional[int] = field(default=1000)
    seed: Optional[int] = field(default=23333)
    fp16: Optional[bool] = field(default=True)
    beta: Optional[float] = field(default=0.1)


@dataclass
class HumanEvalArguments:
    """
    Configuration for running evaluation on HumanEval dataset.
    """

    model_ckpt: Optional[str] = field(
        default="codeparrot/codeparrot", metadata={"help": "Model name or path of model to be evaluated."}
    )
    num_workers: Optional[int] = field(default=None, metadata={"help": "Number of workers used for code evaluation."})
    num_tasks: Optional[int] = field(
        default=None,
        metadata={"help": "The number of human-eval tasks to run. If not included all tasks are evaluated."},
    )
    do_sample: Optional[bool] = field(
        default=True, metadata={"help": "Sample from the language model's output distribution."}
    )
    temperature: Optional[float] = field(default=0.2, metadata={"help": "Sampling temperature used for generation."})
    max_new_tokens: Optional[int] = field(default=256, metadata={"help": "Maximum number of newly generated tokens."})
    top_k: Optional[int] = field(default=0, metadata={"help": "Top-k parameter used for generation."})
    top_p: Optional[float] = field(default=0.95, metadata={"help": "Top-p parameter used for nucleus sampling."})
    batch_size: Optional[int] = field(default=10, metadata={"help": "Number of generations to run in parallel."})
    n_samples: Optional[int] = field(
        default=200, metadata={"help": "Number of completions to generate for each sample."}
    )
    seed: Optional[int] = field(default=1, metadata={"help": "Random seed used for evaluation."})
    output_file: Optional[str] = field(
        default="eval_results.json", metadata={"help": "Random seed used for evaluation."}
    )
    HF_ALLOW_CODE_EVAL: Optional[str] = field(
        default="0", metadata={"help": "Allow `code_eval` to execute Python code on machine"}
    )
    device_int: Optional[int] = field(
        default=-1,
        metadata={
            "help": (
                "Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"
                " number corresponds to which GPU device id to run on."
            )
        },
    )
