import faiss
import numpy as np
from sentence_transformers import SentenceTransformer
from fastapi import FastAPI, UploadFile, File, HTTPException
from fastapi.responses import FileResponse
from pydantic import BaseModel
import base64
from PIL import Image
import io
import os
from dotenv import load_dotenv
from typing import List, Dict
import requests
from pathlib import Path
import time

# -------------------- 初始化 --------------------
app = FastAPI(title="多模态RAG服务")
load_dotenv()

# 模型加载
class MultimodalModels:
    def __init__(self):
        # 文本嵌入模型 (m3e-base)
        # self.text_encoder = SentenceTransformer(r"D:\ideaSpace\MyPython\models\bge-m3", device='cpu', trust_remote_code=True)
        self.text_encoder = SentenceTransformer(r"D:\ideaSpace\MyPython\models\m3e-base")
        print(f"文本嵌入维度: {self.text_encoder.get_sentence_embedding_dimension()}")  # 应为768
        # 多模态模型初始化 (示例使用CLIP, 实际需替换)
        try:
            from transformers import ChineseCLIPModel, AutoProcessor
            # 模型加载
            self.clip_model = ChineseCLIPModel.from_pretrained(r"D:\ideaSpace\MyPython\models\OFA-Sys\chinese-clip-vit-base-patch16")
            self.clip_processor = AutoProcessor.from_pretrained(r"D:\ideaSpace\MyPython\models\OFA-Sys\chinese-clip-vit-base-patch16", use_fast=True) #use_fast=True强制启用快速模式
            print(f"图像嵌入维度: {self.clip_model.config.projection_dim}")  # 应为512
        except ImportError:
            print("警告: 未安装CLIP，图片嵌入功能不可用")
            self.clip_model = None

models = MultimodalModels()

# FAISS索引管理
class FAISSIndex:
    def __init__(self):
        self.index = None
        self.metadata = []
        self.dim = 768  # BGE-M3嵌入维度

    def create_index(self):
        self.index = faiss.IndexFlatIP(self.dim)

    def add_items(self, embeddings: np.ndarray, metadatas: List[Dict]):
        if self.index is None:
            self.create_index()
        self.index.add(embeddings)
        self.metadata.extend(metadatas)

    def search(self, query_embedding: np.ndarray, k: int = 5) -> List[Dict]:
        distances, indices = self.index.search(query_embedding, k)
        return [self.metadata[i] for i in indices[0]]

faiss_index = FAISSIndex()

# -------------------- 核心功能 --------------------
def encode_text(text: str) -> np.ndarray:
    return models.text_encoder.encode([text], convert_to_tensor=False)

def encode_image(image_bytes: bytes) -> np.ndarray:
    if models.clip_model is None:
        raise ValueError("CLIP模型未加载，无法处理图片")
    image = Image.open(io.BytesIO(image_bytes))
    inputs = models.clip_processor(images=image, return_tensors="pt", padding=True)
    outputs = models.clip_model.get_image_features(**inputs)
    return outputs.detach().numpy()

def store_data(text: str, image_bytes: bytes = None):
    """存储多模态数据"""
    # 文本嵌入
    text_embedding = encode_text(text) # 维度768

    # 元数据构造
    metadata = {"text": text, "type": "text"}
    if image_bytes:
        metadata.update({
            "type": "multimodal",
            "image_base64": base64.b64encode(image_bytes).decode('utf-8')
        })
        # 多模态嵌入 (文本+图片)
        image_embedding = encode_image(image_bytes) # 维度512
        # 方案1：将图像嵌入投影到768维
        if image_embedding.shape[1] != text_embedding.shape[1]:
            projection = np.random.randn(512, 768).astype(np.float32)
            image_embedding = image_embedding @ projection
        combined_embedding = np.concatenate([text_embedding, image_embedding])
    else:
        combined_embedding = text_embedding

    # 存入FAISS
    faiss_index.add_items(combined_embedding, [metadata])

def hybrid_search(query_text: str = None, query_image: bytes = None, k: int = 3) -> List[Dict]:
    """混合查询"""
    if query_text and query_image:
        # 多模态查询
        text_embedding = encode_text(query_text)
        image_embedding = encode_image(query_image)
        query_embedding = np.concatenate([text_embedding, image_embedding])
    elif query_text:
        # 纯文本查询
        query_embedding = encode_text(query_text)
    elif query_image:
        # 纯图片查询
        query_embedding = encode_image(query_image)
    else:
        raise ValueError("必须提供查询文本或图片")

    return faiss_index.search(query_embedding, k)

# -------------------- FastAPI端点 --------------------
class StoreRequest(BaseModel):
    text: str
    image_base64: str = None

@app.post("/store")
async def store_item(request: StoreRequest):
    """存储数据端点"""
    try:
        image_bytes = base64.b64decode(request.image_base64) if request.image_base64 else None
        store_data(request.text, image_bytes)
        return {"status": "success"}
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

@app.post("/upload")
async def upload_file(text: str, file: UploadFile = File(...)):
    """文件上传端点"""
    try:
        image_bytes = await file.read()
        store_data(text, image_bytes)
        return {"status": "success"}
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

class SearchRequest(BaseModel):
    query_text: str = None
    query_image_base64: str = None
    top_k: int = 3

@app.post("/search")
async def search_items(request: SearchRequest):
    """混合搜索端点"""
    try:
        query_image = base64.b64decode(request.query_image_base64) if request.query_image_base64 else None
        results = hybrid_search(request.query_text, query_image, request.top_k)

        # 转换base64图片为临时文件
        for item in results:
            if "image_base64" in item:
                img_bytes = base64.b64decode(item["image_base64"])
                path = f"./temp/{int(time.time())}.jpg"
                Path(path).parent.mkdir(exist_ok=True)
                with open(path, "wb") as f:
                    f.write(img_bytes)
                item["image_url"] = f"/temp_images/{Path(path).name}"

        return {"results": results}
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

@app.get("/temp_images/{filename}")
async def get_temp_image(filename: str):
    """临时图片访问端点"""
    path = f"./temp/{filename}"
    if not Path(path).exists():
        raise HTTPException(status_code=404)
    return FileResponse(path)

# -------------------- 智谱AI集成 --------------------
def generate_description_zhipu(prompt: str, image_base64: str, api_key: str) -> str:
    """调用GLM-4V生成描述"""
    headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
    payload = {
        "model": "glm-4v",
        "messages": [{
            "role": "user",
            "content": [
                {"type": "text", "text": prompt},
                {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{image_base64}"}}
            ]
        }]
    }
    response = requests.post("https://open.bigmodel.cn/api/paas/v4/chat/completions",
                             headers=headers, json=payload)
    response.raise_for_status()
    return response.json()['choices'][0]['message']['content']

def generate_image_zhipu(prompt: str, api_key: str) -> str:
    """调用CogView-3生成图片"""
    headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
    payload = {
        "model": "cogview-3",
        "prompt": prompt,
        "size": "1024x1024",
        "response_format": "url"
    }
    response = requests.post("https://open.bigmodel.cn/api/paas/v4/images/generations",
                             headers=headers, json=payload)
    response.raise_for_status()
    return response.json()["data"][0]["url"]

@app.post("/generate_description")
async def generate_description_endpoint(image_base64: str, prompt: str = "描述这张图片"):
    """图片描述生成端点"""
    try:
        api_key = os.getenv("ZHIPUAI_API_KEY")
        description = generate_description_zhipu(prompt, image_base64, api_key)
        return {"description": description}
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

@app.post("/generate_image")
async def generate_image_endpoint(prompt: str):
    """图片生成端点"""
    try:
        api_key = os.getenv("ZHIPUAI_API_KEY")
        image_url = generate_image_zhipu(prompt, api_key)
        return {"image_url": image_url}
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

# -------------------- 主程序 --------------------
if __name__ == "__main__":
    import uvicorn
    # 初始化示例数据
    if not faiss_index.index:
        sample_texts = ["一只熊猫", "埃菲尔铁塔", "太空中的宇航员"]
        for text in sample_texts:
            store_data(text)

    uvicorn.run(app, host="0.0.0.0", port=8000)