import os
import json
import torch
import clip
import numpy as np
import faiss
from PIL import Image
from tqdm import tqdm
import glob

# 配置路径
BASE_DIR = "E:/2025summerJINLIUelectirc/ElecSolutionBB"
DATA_DIR = os.path.join(BASE_DIR, "data", "导线异物数据集")
VECTORDB_DIR = os.path.join(BASE_DIR, "VectorDB", "PowerLineVectorDB")
os.makedirs(VECTORDB_DIR, exist_ok=True)

# 加载CLIP模型
device = "cuda" if torch.cuda.is_available() else "cpu"
model, preprocess = clip.load("ViT-B/32", device=device)


def extract_features(image_path, conversations):
    """提取图像和文本特征"""
    # 处理图像
    try:
        img = Image.open(image_path)
        img = preprocess(img).unsqueeze(0).to(device)
        with torch.no_grad():
            image_features = model.encode_image(img)
    except Exception as e:
        print(f"Error processing image {image_path}: {e}")
        return None, None, None

    # 处理文本 - 生成描述性文本
    text_description = generate_description(conversations)
    text = clip.tokenize([text_description]).to(device)
    with torch.no_grad():
        text_features = model.encode_text(text)

    return image_features.cpu().numpy(), text_features.cpu().numpy(), text_description


def generate_description(conversations):
    """从对话生成描述性文本"""
    description = ""
    for i in range(0, len(conversations), 2):
        if i + 1 < len(conversations):
            question = conversations[i]["value"]
            answer = conversations[i + 1]["value"]

            if "是否存在异物" in question:
                description += f"异物检测: {answer}. "
            elif "异物是什么" in question or "部件是什么" in question:
                description += f"类型: {answer}. "
            elif "位置" in question:
                # 跳过位置信息
                continue
    return description.strip()


def build_vector_db(json_data):
    """构建向量数据库"""
    image_features_list = []
    text_features_list = []
    metadata = []
    skipped_items = 0

    # 获取所有图片文件
    image_files = glob.glob(os.path.join(DATA_DIR, "*.jpg"))
    print(f"Found {len(image_files)} images in dataset directory")

    # 创建文件名到路径的映射
    image_file_map = {os.path.basename(f): f for f in image_files}

    for item in tqdm(json_data, desc="Processing items"):
        # 尝试从对话中提取图片文件名
        img_filename = None
        for conv in item["conversations"]:
            if "value" in conv and ".jpg" in conv["value"]:
                # 查找包含.jpg的字符串
                parts = conv["value"].split(".jpg")
                if parts:
                    # 提取文件名部分
                    img_filename = parts[0].split("/")[-1] + ".jpg"
                    break

        if not img_filename:
            print(f"Skipping item {item.get('id', 'unknown')} - image filename not found")
            skipped_items += 1
            continue

        # 查找对应的图片文件
        img_path = image_file_map.get(img_filename)
        if not img_path:
            print(f"Skipping item {item.get('id', 'unknown')} - image not found: {img_filename}")
            skipped_items += 1
            continue

        img_feat, text_feat, description = extract_features(img_path, item["conversations"])
        if img_feat is None or text_feat is None:
            skipped_items += 1
            continue

        image_features_list.append(img_feat)
        text_features_list.append(text_feat)
        metadata.append({
            "id": item.get("id", "unknown"),
            "image_path": img_path,
            "description": description,
            "conversations": item["conversations"]
        })

    if not image_features_list:
        print("No valid items processed. Exiting.")
        return 0

    # 转换为numpy数组
    image_features = np.vstack(image_features_list).astype('float32')
    text_features = np.vstack(text_features_list).astype('float32')

    # 创建FAISS索引
    image_index = faiss.IndexFlatIP(image_features.shape[1])
    text_index = faiss.IndexFlatIP(text_features.shape[1])

    # 归一化向量
    faiss.normalize_L2(image_features)
    faiss.normalize_L2(text_features)

    # 添加向量到索引
    image_index.add(image_features)
    text_index.add(text_features)

    # 保存索引和元数据
    faiss.write_index(image_index, os.path.join(VECTORDB_DIR, "image_index.faiss"))
    faiss.write_index(text_index, os.path.join(VECTORDB_DIR, "text_index.faiss"))
    with open(os.path.join(VECTORDB_DIR, "metadata.json"), "w", encoding="utf-8") as f:
        json.dump(metadata, f, ensure_ascii=False, indent=2)

    print(f"Skipped {skipped_items} items due to errors")
    return len(metadata)


# 主执行函数
if __name__ == "__main__":
    # 加载训练数据
    train_data_path = "train_set.json"
    if not os.path.exists(train_data_path):
        print(f"Error: Training data file not found at {train_data_path}")
        exit(1)

    try:
        with open(train_data_path, "r", encoding="utf-8") as f:
            train_data = json.load(f)
    except Exception as e:
        print(f"Error loading training data: {e}")
        exit(1)

    # 构建向量数据库
    print("Starting vector database construction...")
    num_items = build_vector_db(train_data)
    print(f"Vector database built successfully with {num_items} items!")
    print(f"Saved to: {VECTORDB_DIR}")