import torch
from dataclasses import dataclass
from pathlib import Path
from transformers import PreTrainedModel, PretrainedConfig, AutoTokenizer, AutoModelForCausalLM


class PathConfig:
    project_root = Path(__file__).parent.parent
    vision_model: str = " "
    llm_model: str = " "
    dataset_dir = project_root / "Dataset" / "chat-translated.json"
    image_dir = project_root / "Dataset" / "pretrain_images"
    checkpoints_dir = project_root / "checkpoints"
    log_dir = project_root / "logs"
    export_dir = project_root / "model"


class ModelConfig(PretrainedConfig):
    model_type = "vlm_model"

    def __init__(self, vision_model_path='/data/xiaoyj2025/GeoVLM/models/siglip2-so400m-patch14-384',
                 llm_model_path='/data/xiaoyj2025/GeoVLM/models/Qwen2.5-1.5B-Instruct',
                 freeze_vision_model=True,
                 image_pad_num=81,
                 lora_rank=64,
                 lora_alpha=128,
                 lora_target_modules=["q_proj", "k_proj", "w2"],  # Qwen特定模块
                 lora_dropout=0.1,
                 **kwargs):
        self.vision_model = vision_model_path
        self.llm_model = llm_model_path
        self.freeze_vision_model = freeze_vision_model
        self.image_pad_num = image_pad_num
        self.lora_rank = lora_rank
        self.lora_alpha = lora_alpha
        self.lora_target_modules = lora_target_modules
        self.lora_dropout = lora_dropout
        super().__init__(**kwargs)


@dataclass
class TrainConfig:
    batch_size: int = 2
    grad_accum: int = 4
    lr: float = 2e-5
    grad_accum_steps: int = 8  # 梯度累积步数（有效批次=batch_size*grad_accum）
    num_epochs: int = 10  # 总训练轮次
    learning_rate: float = 2e-5  # 初始学习率
    weight_decay: float = 0.01  # 权重衰减
    freeze_vision_model: bool = True  # 冻结视觉编码器
    mixed_precision: bool = True  # 启用混合精度训练
    max_seq_len: int = 256  # 最大序列长度
    image_pad_num: int = 81  # 图像占位符大小
    warmup_ratio: float = 0.1  # warmup步数比例
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    projection_pretrain_epochs = 10  # 投影层预训练轮次
    pre_batch_size = 2  # 可增大批次
    pre_learning_rate = 2e-5
    pre_warmup_steps = 500  # 学习率预热步数
    pre_max_seq_len = 512  # 序列长度
