import os
os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
import sys
import argparse
import yaml
import logging
from pathlib import Path
from tqdm import tqdm

import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader

from transformers import AutoProcessor, AutoModel
from peft import PeftModel

# --- 动态添加项目根目录到sys.path ---
# 建议在项目入口处或通过环境变量 PYTHONPATH 设置，这里保留原逻辑
try:
    project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
    if project_root not in sys.path:
        sys.path.insert(0, project_root)
    from src.image_retrieval.models.image_encoder import ImageEncoder
    from src.image_retrieval.data.transforms import build_transform_from_config
    from src.image_retrieval.data.datasets import InferenceDataset, collate_fn
    from utils.proj import proj_data
    from utils.parquet import save_embeddings_to_parquet
except (ImportError, NameError) as e:
    print(f"Error importing project modules: {e}")
    print("Please ensure the script is run from a location where 'src' and 'utils' are accessible,")
    print("or that the project root is correctly added to PYTHONPATH.")
    sys.exit(1)

def setup_logging(experiment_run_dir, checkpoint_dir):
    """配置日志记录"""
    epoch_str = os.path.basename(checkpoint_dir)
    log_path = os.path.join(experiment_run_dir, f'embedding_extraction_{epoch_str}.log')
    os.makedirs(os.path.dirname(log_path), exist_ok=True)
    logging.basicConfig(level=logging.INFO,
                        format='%(asctime)s %(levelname)s %(message)s',
                        handlers=[logging.FileHandler(log_path, mode='w'), logging.StreamHandler()])

def load_model_and_processor(config, checkpoint_dir, device):
    """
    加载模型、处理器，并将其移动到指定设备。
    """
    logging.info(f"Loading model on device: {device}")

    model_name = config['model']['model_name']
    # 设置 trust_remote_code=True 是为了兼容一些需要自定义代码的模型
    processor = AutoProcessor.from_pretrained(model_name, trust_remote_code=True)
    vision_model = AutoModel.from_pretrained(model_name, trust_remote_code=True).vision_model

    projection_dim = config.get('model', {}).get('projection_dim', 256)
    model = ImageEncoder(vision_model, projection_dim=projection_dim)

    logging.info(f"Loading LoRA adapter and projection head weights from {checkpoint_dir}")
    # 加载LoRA权重并合并
    model.base_model = PeftModel.from_pretrained(model.base_model, checkpoint_dir)
    model.base_model = model.base_model.merge_and_unload()
    logging.info("LoRA weights merged for faster inference.")

    # 加载投影层权重
    projection_weights_path = os.path.join(checkpoint_dir, 'projection.pt')
    model.projection.load_state_dict(torch.load(projection_weights_path, map_location='cpu'))
    logging.info("Projection head weights loaded.")

    model.to(device)
    model.eval()
    return model, processor

def extract_features_for_directory(image_dir, model, processor, config, args, checkpoint_dir):
    """
    对单个目录中的图像进行特征提取。
    """
    device = next(model.parameters()).device
    logging.info(f"Processing directory: {image_dir}")

    # 准备数据
    transform = build_transform_from_config(config, processor, mode='val')
    dataset = InferenceDataset(image_dir=image_dir, transform=transform)
    
    if len(dataset) == 0:
        logging.warning(f"No images found in {image_dir}, skipping.")
        return

    dataloader = DataLoader(
        dataset,
        batch_size=args.batch_size,
        shuffle=False,
        # 可以适当增加 num_workers 以加速数据预处理
        num_workers=config.get('dataloader', {}).get('num_workers', 4),
        collate_fn=collate_fn
    )

    # 特征提取
    logging.info(f"Starting feature extraction for {len(dataset)} images in {Path(image_dir).name}...")
    all_embeddings = []
    all_image_ids = []

    with torch.no_grad():
        for images, paths, image_ids in tqdm(dataloader, desc=f"Extracting {Path(image_dir).name}"):
            if images is None:
                continue
            images = images.to(device)
            embeddings = model(pixel_values=images)
            embeddings = F.normalize(embeddings, p=2, dim=1)
            all_embeddings.append(embeddings.cpu())
            all_image_ids.extend(image_ids)

    if not all_image_ids:
        logging.error(f"No valid images were processed in {image_dir}. Aborting this directory.")
        return

    # # 生成输出路径
    img_file_name = Path(image_dir).resolve().name
    output_dir = checkpoint_dir.replace('outputs/experiments', 'outputs/embeddings', 1)
    output_path = os.path.join(output_dir, f'{img_file_name}_embeddings.parquet')
    os.makedirs(output_dir, exist_ok=True)

    # # 保存结果
    final_embeddings = torch.cat(all_embeddings, dim=0).numpy()
    ans = proj_data(img_file_name, all_image_ids, final_embeddings)
    save_embeddings_to_parquet(ans, output_path)
    logging.info(f"Saved embeddings for {img_file_name} to {output_path}")
    return output_path

def main(args):
    # --- 1. 路径与配置加载 ---
    checkpoint_dir = args.checkpoint_dir
    experiment_run_dir = os.path.dirname(os.path.dirname(checkpoint_dir))

    # 优先使用指定的 config 文件，否则自动查找
    if args.config:
        config_path = args.config
    else:
        try:
            config_files = [f for f in os.listdir(experiment_run_dir) if f.endswith(('.yml', '.yaml'))]
            if not config_files:
                raise FileNotFoundError(f"在 {experiment_run_dir} 中找不到YAML配置文件。请使用 --config 指定。")
            config_path = os.path.join(experiment_run_dir, config_files[0])
        except FileNotFoundError as e:
            logging.error(e)
            sys.exit(1)

    with open(config_path, 'r') as f:
        config = yaml.safe_load(f)

    # --- 2. 日志设置 ---
    setup_logging(experiment_run_dir, checkpoint_dir)
    logging.info(f"使用配置文件: {config_path}")
    logging.info(f"从检查点加载模型: {checkpoint_dir}")
    logging.info(f"将要处理的目录: {args.image_dirs}")

    # --- 3. 设备和模型加载 (只执行一次) ---
    if not torch.cuda.is_available():
        logging.error("CUDA is not available. Please run on a machine with a GPU.")
        sys.exit(1)
    
    device = "cuda"
    model, processor = load_model_and_processor(config, args.checkpoint_dir, device)

    # --- 4. 循环处理所有目录 ---
    save_paths = []
    for image_dir in args.image_dirs:
        save_paths.append(extract_features_for_directory(
            image_dir=image_dir,
            model=model,
            processor=processor,
            config=config,
            args=args,
            checkpoint_dir=args.checkpoint_dir
        ))
        logging.info("-" * 50) # 添加分隔符，使日志更清晰

    logging.info("所有任务完成！")
    return save_paths

# # 示例命令
# python eval_triple.py \
#     --checkpoint_dir outputs/experiments/cls_triple_base/base/checkpoints/epoch_1 \
#     --image_dirs /home/tfj/datasets/image_retri10k/eval_images_q/ /home/tfj/datasets/image_retri10k/eval_images_v/ /home/tfj/datasets/image_retri10k/eval_images_o/ \
#     --batch_size 128


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="使用训练好的模型高效提取图像特征（单GPU）。")
    parser.add_argument('--checkpoint_dir', type=str, default='outputs/experiments/cls_triple_base_e100/20250620_201447/checkpoints/epoch_41',
                        help='模型检查点目录的路径 (例如, outputs/experiments/exp_name/run_id/checkpoints/epoch_1)。')
    parser.add_argument('--image_dirs', type=str, nargs='+', 
                        default=[ # 顺序qvo
                                '/home/tfj/datasets/image_retri10k/eval_images_q/',
                                 '/home/tfj/datasets/image_retri10k/eval_images_v/',
                                 '/home/tfj/datasets/image_retri10k/eval_images_o/'
                                 ],
                        help='一个或多个包含图像的目录路径，用空格分隔。')
    parser.add_argument('--config', type=str, default=None,
                        help='(可选) 实验配置文件的路径。如果未提供，将自动在检查点父目录中搜索。')
    parser.add_argument('--batch_size', type=int, default=128,  # A100显存很大，可以适当调大batch_size
                        help='推理时的批处理大小。')

    args = parser.parse_args()


    # 运行主程序
    save_paths = main(args)
    q_path,v_path,o_path = save_paths
    # q_path = 'outputs/embeddings/cls_triple_base_e100/20250620_201447/checkpoints/epoch_10/eval_images_q_embeddings.parquet'
    # v_path = 'outputs/embeddings/cls_triple_base_e100/20250620_201447/checkpoints/epoch_10/eval_images_v_embeddings.parquet'
    # o_path = 'outputs/embeddings/cls_triple_base_e100/20250620_201447/checkpoints/epoch_10/eval_images_o_embeddings.parquet'
    from utils.parquet import merge_oR2I_parquet_files,merge_qv2R_parquet_files
    save_r_path = q_path.replace('_q_embeddings','_R_embeddings')
    merge_qv2R_parquet_files(q_path,v_path,save_r_path)
    save_i_path = o_path.replace('_o_embeddings','_I_embeddings')
    merge_oR2I_parquet_files(o_path,save_r_path,save_i_path)
    