# 文件: train.py
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '1' # 使用 accelerate launch 后，这行不再必要
os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'
import argparse
import yaml
import torch
import time
from torch.utils.data import DataLoader
from transformers import AutoProcessor, AutoModel
from peft import LoraConfig, get_peft_model
from tqdm import tqdm
import logging
from accelerate import Accelerator
from accelerate.logging import get_logger
import shutil

# 导入重构后的模块
from src.image_retrieval.data.datasets import TripletDataset
from src.image_retrieval.data.transforms import build_transform_from_config
from src.image_retrieval.models.image_encoder import ImageEncoder

def main(config, args):
    # --- 0. SETUP ACCELERATOR & LOGGING ---
    now = time.strftime("%Y%m%d_%H%M%S", time.localtime())
    exp_name = config.get('exp_name', 'default_experiment')
    save_dir = os.path.join('outputs/experiments', exp_name, now)
    
    # 初始化 Accelerator 并指定使用 tensorboard
    accelerator = Accelerator(log_with="tensorboard", project_dir=save_dir)
    accelerator.init_trackers(exp_name)
    # Setup logging
    logging.basicConfig(
        format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
        datefmt="%m/%d/%Y %H:%M:%S",
        level=logging.INFO,
    )
    logger = get_logger(__name__)
    logger.info(accelerator.state, main_process_only=False)

    if accelerator.is_main_process:
        os.makedirs(save_dir, exist_ok=True)
        # --- 为日志系统添加文件输出 ---
        log_file_path = os.path.join(save_dir, "train.log")
        file_handler = logging.FileHandler(log_file_path)
        formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(name)s - %(message)s")
        file_handler.setFormatter(formatter)
        # 将文件处理器添加到根日志记录器，以便捕获所有日志
        logging.getLogger().addHandler(file_handler)
        # --------------------------------

        # 拷贝配置文件到实验目录，方便复现
        shutil.copy(args.config, os.path.join(save_dir, os.path.basename(args.config)))
        # 【新加】获取 tensorboard tracker，方便后续刷新
        try:
            tensorboard_tracker = accelerator.get_tracker("tensorboard")
        except KeyError:
            logger.warning("TensorBoard tracker not found. Logs will not be written to TensorBoard.")
            tensorboard_tracker = None


    # --- 1. 初始化模型、处理器和数据增强 ---
    model_name = config['model']['model_name']
    
    processor = AutoProcessor.from_pretrained(model_name)
    vision_model = AutoModel.from_pretrained(model_name).vision_model
    
    projection_dim = config.get('model', {}).get('projection_dim', 256)
    model = ImageEncoder(vision_model, projection_dim=projection_dim)
    
    train_transform = build_transform_from_config(config, processor, mode='train')
    logger.info(f"Built training transforms: {train_transform}", main_process_only=True)

    # --- 2. 配置LoRA ---
    lora_config = LoraConfig(
        r=int(config['lora']['r']),
        lora_alpha=int(config['lora']['lora_alpha']),
        target_modules=config['lora']['target_modules'],
        lora_dropout=float(config['lora']['lora_dropout']),
    )
    model.base_model = get_peft_model(model.base_model, lora_config)
    if accelerator.is_main_process:
        model.base_model.print_trainable_parameters()

    # --- 3. 准备数据和训练组件 ---
    train_dataset = TripletDataset(
        root=config['dataloader']['root'],
        transform=train_transform
    )
    
    train_loader = DataLoader(
        train_dataset, 
        batch_size=int(config['train']['batch_size']), 
        shuffle=True,
        num_workers=int(config['dataloader']['num_workers'])
    )

    optimizer = torch.optim.AdamW(model.parameters(), lr=float(config['train']['learning_rate']))
    triplet_loss = torch.nn.TripletMarginLoss(margin=float(config['train']['margin']), p=2)

    model, optimizer, train_loader = accelerator.prepare(
        model, optimizer, train_loader
    )

    # --- 4. 训练循环 ---
    num_epochs = int(config['train']['num_epochs'])
    logger.info("***** Running training *****")
    logger.info(f"  Num examples = {len(train_dataset)}")
    logger.info(f"  Num Epochs = {num_epochs}")
    logger.info(f"  Instantaneous batch size per device = {config['train']['batch_size']}")
    logger.info(f"  Total train batch size (w. parallel, distributed) = {int(config['train']['batch_size']) * accelerator.num_processes}")

    global_step = 0
    for epoch in range(num_epochs):
        model.train()
        total_loss = 0
        
        progress_bar = tqdm(train_loader, desc=f"Epoch {epoch+1}/{num_epochs}", disable=not accelerator.is_local_main_process)
        
        for batch in progress_bar:
            anchor_imgs, positive_imgs, negative_imgs = batch
            
            all_images = torch.cat([anchor_imgs, positive_imgs, negative_imgs], dim=0)
            
            outputs = model(pixel_values=all_images)
            
            batch_size = anchor_imgs.size(0)
            anchor_embed, positive_embed, negative_embed = outputs[:batch_size], outputs[batch_size:2*batch_size], outputs[2*batch_size:]

            loss = triplet_loss(anchor_embed, positive_embed, negative_embed)
            
            total_loss += loss.detach().float()
            
            accelerator.backward(loss)
            optimizer.step()
            optimizer.zero_grad()

            progress_bar.set_postfix(loss=loss.item())
            logger.info(f"Epoch {epoch} train_loss: {loss.item():.4f}")
            if accelerator.is_main_process:
                accelerator.log({"train_loss": loss.item()}, step=global_step)

            global_step += 1

        avg_loss = total_loss / len(train_loader)
        logger.info(f"Epoch {epoch+1} finished. Average Loss: {avg_loss:.4f}")
        
        if accelerator.is_main_process:
            accelerator.log({"epoch_avg_loss": avg_loss}, step=epoch)
            # 【新加】在每个 epoch 结束时刷新一次，确保数据写入磁盘
            if tensorboard_tracker:
                tensorboard_tracker.writer.flush()


        # --- 5. 保存模型 ---
        accelerator.wait_for_everyone()
        if accelerator.is_main_process:
            epoch_save_dir = os.path.join(save_dir, 'checkpoints', f"epoch_{epoch+1}")
            os.makedirs(epoch_save_dir, exist_ok=True)
            
            unwrapped_model = accelerator.unwrap_model(model)
            unwrapped_model.base_model.save_pretrained(epoch_save_dir)
            torch.save(unwrapped_model.projection.state_dict(), os.path.join(epoch_save_dir, f'projection.pt'))
            logger.info(f"Model weights for epoch {epoch+1} saved to {epoch_save_dir}")
    
    accelerator.end_training()
    logger.info("Training complete.")

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Train with LoRA and YAML config file.")
    parser.add_argument('--config', type=str, default='configs/cls_triple.yml', help='Path to YAML config file')
    args = parser.parse_args()
    with open(args.config, 'r') as f:
        config = yaml.safe_load(f)
    main(config, args)