import os
os.environ['MASTER_PORT'] = '29501'
os.environ["CUDA_VISIBLE_DEVICES"] = "1"  # 必须放在所有import之前
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import AutoModelForCausalLM, BitsAndBytesConfig, AutoTokenizer, CLIPVisionModel
from peft import LoraConfig, get_peft_model, get_peft_model_state_dict, set_peft_model_state_dict
from transformers import AutoModelForCausalLM
from transformers.modeling_outputs import CausalLMOutputWithPast
from transformers import Trainer, TrainingArguments, DataCollatorWithPadding
from configs.config import ModelConfig, TrainConfig, PathConfig
from transformers import AutoProcessor, AutoModel
from src.GeoCLIP import GeoCLIP
from src.dataset import PretrainMyDataset, MyDataset, MyDataCollator


def main():
    config = ModelConfig()
    model = GeoCLIP(config).to(TrainConfig.device)
    for param in model.llm_model.parameters():
        param.requires_grad = False
    print(model)
    print(f'模型参数量为：{sum(p.numel() for p in model.parameters())}')
    print(f'模型可训练参数量为：{sum(p.numel() for p in model.parameters() if p.requires_grad)}')
    tokenizer = AutoTokenizer.from_pretrained(config.llm_model, trust_remote_code=True)
    processor = AutoProcessor.from_pretrained(config.vision_model)
    images_path = '/data/xiaoyj2025/GeoVLM/Dataset/pretrain_images'
    data_path = '/data/xiaoyj2025/GeoVLM/Dataset/chat.json'
    output_dir = 'save/pretrain_en'
    args = TrainingArguments(
        output_dir=output_dir,
        do_train=True,
        per_device_train_batch_size=9,
        learning_rate=1e-4,
        num_train_epochs=2,
        save_steps=1000,
        save_total_limit=2,
        fp16=True,
        gradient_accumulation_steps=16,
        logging_dir="save/pretrain_en/logs",
        logging_steps=75,
        report_to='tensorboard',
        dataloader_pin_memory=True,
        dataloader_num_workers=8,
        deepspeed="/data/xiaoyj2025/GeoVLM/configs/ds_config.json"
    )
    trainer = Trainer(
        model=model,
        args=args,
        train_dataset=PretrainMyDataset(images_path, data_path, tokenizer, processor, config),
        data_collator=MyDataCollator(tokenizer)
    )

    trainer.train(resume_from_checkpoint="save/pretrain_en/checkpoint-3000")
    trainer.save_model('save/pretrain_en/pretrain_en')
    trainer.save_state()


if __name__ == '__main__':
    main()
