import glob
import os

import chromadb
import requests
from chromadb import Client, Settings
from chromadb.utils import embedding_functions

from vllm_models.qwen_chat_model import Qwen2ChatModel
from vllm_models.qwen_vl_model import Qwen2VLModel
from ipex_models.qwen2_vl_chat_xpu import Qwen2VLChatModel


from chromadb.utils import embedding_functions

class OpenAIEmbeddingFunction(embedding_functions.EmbeddingFunction):
    def __init__(self, model_name="bge-m3",
                 embedding_url="http://127.0.0.1:11434/v1/embeddings"):
        self.model_name = model_name
        self.embedding_url = embedding_url
        self.max_text_length = 768

    def __call__(self, text):
        headers = {
            "Content-Type": "application/json",
            "Authorization": f"Bearer Empty"
        }
        if len(text) >= self.max_text_length:
            text = text[:self.max_text_length - 1]
        response = requests.post(self.embedding_url,
                                 headers=headers,
                                 json={"model": self.model_name, "input": [text]})
        data = response.json()["data"][0]["embedding"]
        return data


# 1. 遍历目录读取所有的图片文件
def get_image_files(directory, extensions=['.jpg', '.jpeg', '.png', '.bmp']):
    """遍历目录并返回所有指定后缀的图片文件路径"""
    image_files = []
    for ext in extensions:
        image_files.extend(glob.glob(os.path.join(directory, f'*{ext}')))
    return image_files


# 2. 调用vl_model描述图片
def describe_image(vl_model, image_path):
    """使用vl_model描述图片，输出各个维度的信息"""
    # 定义提示词，让vl_model输出图片的各个维度信息
    prompt = """请详细描述这张图片的各个维度，包括但不限于以下内容：
1. **主体内容**：图片中的主要物体或人物是什么？它们的位置和姿态如何？
2. **人物信息**：图片中是否有人？如果有，请描述他们的数量、性别、年龄、穿着、动作和表情。
3. **场景类型**：这是一张什么类型的图片？例如，是生活场景（如家庭聚会、户外活动）、工作场景（如办公室、工厂）、自然风景（如山川、河流）、还是其他类型（如艺术创作、抽象图像）？
4. **图像类别**：这是一张什么类别的图像？例如：
   - **商品图像**：如产品包装、广告宣传图、电商展示图等。
   - **艺术图像**：如绘画、摄影作品、抽象艺术等。
   - **纪实图像**：如新闻照片、街拍、历史记录等。
   - **生活图像**：如家庭照片、旅行照片、日常记录等。
   - **科技图像**：如科学实验图、数据可视化、技术示意图等。
   - **其他类别**：如教育图像（如教材插图）、医疗图像（如X光片）、娱乐图像（如电影海报）等。
5. **细节补充**：图片中是否有其他值得注意的细节？例如，背景中的物体、文字、符号或特殊的光影效果。
6. **整体评价**：用一句话总结这张图片给你的整体印象。

请尽可能详细地描述，尽量涵盖以上所有维度，如何有的维度不适用可以忽略这一维度。"""
    description = vl_model.describe_image(image_path, prompt)
    return description


# 3. 调用chat_model进行知识增强和格式化输出
def enhance_description(chat_model, description):
    """使用chat_model对vl_model的输出进行知识增强和格式化输出"""
    # 定义提示词，让chat_model对描述进行知识增强
    prompt = f"请根据以下图片描述的各个维度，生成一段完整、流畅的中文描述，并补充相关的背景知识和细节,请保证输出内容不要超过510字。图片描述信息如下:\n{description}"
    enhanced_description = chat_model.chat(prompt)
    return enhanced_description


def extract_metadata_from_image_desc(chat_model, description):
    prompt = f"请根据以下图片描述的各个维度，抽取元数据标签，元数据标签请从下面中进行选择。图片描述如下:\n{description}"
    metadata = chat_model.chat(prompt)
    return metadata


# 4. 通过chromadb构建向量数据库
def build_vector_db(descriptions, metadata, embedding_function, db_path="vector_db"):
    """构建向量数据库并保存到本地目录"""
    client = chromadb.PersistentClient(path=db_path)
    collection = client.get_or_create_collection(name="image_descriptions")

    # 将描述和元数据添加到向量数据库中
    for idx, (desc, meta) in enumerate(zip(descriptions, metadata)):
        collection.add(
            documents=[desc],
            metadatas=[meta],
            ids=[str(idx)],
            embeddings=embedding_function(desc),
        )


# 5. 基于语义的搜索测试
def semantic_search(query, embedding_function, db_path="vector_db", top_k=5):
    """基于语义的搜索"""
    client = chromadb.PersistentClient(path=db_path)
    collection = client.get_collection(name="image_descriptions")
    query_embedding = embedding_function(query)

    results = collection.query(
        query_embeddings=query_embedding,
        n_results=top_k
    )
    return results


# 6. 基于元标签的搜索测试
def metadata_search(metadata_key, metadata_value, db_path="vector_db", top_k=5):
    """基于元标签的搜索"""
    client = Client(Settings(persist_directory=db_path))
    collection = client.get_collection(name="image_descriptions")

    results = collection.query(
        where={metadata_key: metadata_value},
        n_results=top_k
    )
    return results


# 主函数
def main_vllm():
    vl_model = Qwen2VLModel(model_name="Qwen/Qwen2-VL-2B-Instruct", base_url="http://10.1.30.3:48000/v1",
                            api_key="Empty")
    chat_model = Qwen2ChatModel(model_name="Qwen/Qwen2.5-7B-Instruct", base_url="http://10.1.30.3:38000/v1",
                                api_key="123456")
    embedding_function = OpenAIEmbeddingFunction(model_name="bge-m3:latest",
                                                 embedding_url="http://127.0.0.1:11434/v1/embeddings")
    # 定义图片目录
    image_directory = "../examples/rag_data/images"
    image_describe_generated = "../examples/rag_data/generated/image_describe"
    image_summary_generated = "../examples/rag_data/generated/image_summary"

    # 获取所有图片文件
    image_files = get_image_files(image_directory)

    descriptions = []
    metadata = []

    # 遍历图片文件，生成描述和元数据
    for image_path in image_files:
        # 调用vl_model描述图片
        description = describe_image(vl_model, image_path)
        file_name = os.path.basename(image_path)
        open(f"{image_describe_generated}/{file_name}.txt", "w").write(description)
        # 调用chat_model进行知识增强和格式化输出
        enhanced_description = enhance_description(chat_model, description)
        open(f"{image_summary_generated}/{file_name}.txt", "w").write(enhanced_description)

        # TODO 元数据标签提取
        extracted_metadata = extract_metadata_from_image_desc(chat_model, description)

        # 定义元标签（可以根据需要自定义）
        meta = {
            "image_path": image_path,
            "description": description,
            # "tags": "image"
        }

        descriptions.append(enhanced_description)
        metadata.append(meta)

    db_path = "./vector_db"
    # 构建向量数据库
    build_vector_db(descriptions, metadata, embedding_function, db_path=db_path)

    # 测试基于语义的搜索
    semantic_results = semantic_search("Intel ARC 770显卡", embedding_function, db_path=db_path)
    print("Semantic Search Results:", semantic_results)

    # 测试基于元标签的搜索
    # metadata_results = metadata_search("tags", "image")
    # print("Metadata Search Results:", metadata_results)


def main_local():
    embedding_function = OpenAIEmbeddingFunction(model_name="bge-m3:latest",
                                                 embedding_url="http://127.0.0.1:11434/v1/embeddings")
    # 定义图片目录
    image_directory = "../examples/rag_data/images"
    image_describe_generated = "../examples/rag_data/generated/image_describe"
    image_summary_generated = "../examples/rag_data/generated/image_summary"

    # 获取所有图片文件
    image_files = get_image_files(image_directory)

    descriptions = []
    metadata = []
    just_build_rag = True
    if just_build_rag:
        for image_path in image_files:
            # 调用vl_model描述图片
            file_name = os.path.basename(image_path)
            description = open(f"{image_describe_generated}/{file_name}.txt", "r", encoding='utf-8').read()
            enhanced_description = open(f"{image_summary_generated}/{file_name}.txt", "r", encoding='utf-8').read()
            meta = {
                "image_path": image_path,
                "description": description,
                # "tags": "image"
            }

            descriptions.append(enhanced_description)
            metadata.append(meta)
    else:
        vl_model = Qwen2VLChatModel()
        chat_model = Qwen2ChatModel(model_name="qwen2.5:7b-instruct-q4_0", base_url="http://127.0.0.1:11434/v1",
                                    api_key="Empty")
        # 遍历图片文件，生成描述和元数据
        for image_path in image_files:
            # 调用vl_model描述图片
            description = describe_image(vl_model, image_path)
            file_name = os.path.basename(image_path)
            open(f"{image_describe_generated}/{file_name}.txt", "w").write(description)
            # 调用chat_model进行知识增强和格式化输出
            enhanced_description = enhance_description(chat_model, description)
            open(f"{image_summary_generated}/{file_name}.txt", "w").write(enhanced_description)

            # TODO 元数据标签提取
            extracted_metadata = extract_metadata_from_image_desc(chat_model, description)

            # 定义元标签（可以根据需要自定义）
            meta = {
                "image_path": image_path,
                "description": description,
                # "tags": "image"
            }

            descriptions.append(enhanced_description)
            metadata.append(meta)

    db_path = "./vector_db"
    # 构建向量数据库
    build_vector_db(descriptions, metadata, embedding_function, db_path=db_path)

    # 测试基于语义的搜索
    semantic_results = semantic_search("Intel ARC 770显卡", embedding_function, db_path=db_path)
    print("Semantic Search Results:", semantic_results)

    # 测试基于元标签的搜索
    # metadata_results = metadata_search("tags", "image")
    # print("Metadata Search Results:", metadata_results)

if __name__ == "__main__":
    main_local()
