import os
from fastapi import FastAPI, UploadFile, File, Query
from fastapi.responses import JSONResponse, FileResponse
from fastapi.staticfiles import StaticFiles
import shutil
from typing import List
import uvicorn
import cv2
from PIL import Image
import torch
from transformers import CLIPProcessor, CLIPModel
import chromadb
import json
import ssl
from fastapi import HTTPException, UploadFile, File
from fastapi.middleware.cors import CORSMiddleware
import traceback
import tempfile  # 添加这个导入
import asyncio
import subprocess  # 添加这个导入


# 添加环境变量以禁用 SSL 验证（不推荐在生产环境中使用）
os.environ['CURL_CA_BUNDLE'] = ''
os.environ['REQUESTS_CA_BUNDLE'] = ''

# 或者设置 Hugging Face 离线模式
os.environ['TRANSFORMERS_OFFLINE'] = '1'

app = FastAPI()

# CORS 配置
app.add_middleware(
    CORSMiddleware,
    allow_origins=["http://localhost:5173"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# 全局变量存储模型和处理器
model = None
processor = None


def load_model_and_processor():
    global model, processor
    try:
        # 方案1：使用本地缓存（推荐）
        cache_dir = "./model_cache"
        os.makedirs(cache_dir, exist_ok=True)

        model = CLIPModel.from_pretrained(
            "openai/clip-vit-base-patch32",
            cache_dir=cache_dir,
            local_files_only=True  # 只使用本地文件
        )
        processor = CLIPProcessor.from_pretrained(
            "openai/clip-vit-base-patch32",
            cache_dir=cache_dir,
            local_files_only=True  # 只使用本地文件
        )
    except Exception as e:
        print(f"从本地加载失败，尝试在线下载: {e}")
        try:
            # 方案2：禁用 SSL 验证下载（不推荐在生产环境中使用）
            ssl._create_default_https_context = ssl._create_unverified_context

            model = CLIPModel.from_pretrained(
                "openai/clip-vit-base-patch32",
                cache_dir=cache_dir
            )
            processor = CLIPProcessor.from_pretrained(
                "openai/clip-vit-base-patch32",
                cache_dir=cache_dir
            )
        except Exception as download_error:
            print(f"在线下载失败: {download_error}")
            raise


# 在应用启动时加载模型
@app.on_event("startup")
async def startup_event():
    load_model_and_processor()

app = FastAPI()

app.add_middleware(
    CORSMiddleware,
    allow_origins=["http://localhost:5173"],  # 可以配置多个前端地址
    allow_credentials=True,
    allow_methods=["*"],  # 允许所有请求方法
    allow_headers=["*"],  # 允许所有请求头
)

# 创建必要的目录
os.makedirs("query_images", exist_ok=True)
os.makedirs("extracted_frames", exist_ok=True)
# os.makedirs("static/clips", exist_ok=True)

# 挂载静态文件目录
app.mount("/static", StaticFiles(directory="static"), name="static")

# 添加错误处理
@app.exception_handler(Exception)
async def global_exception_handler(request, exc):
    print(f"错误详情: {str(exc)}")
    print(traceback.format_exc())
    return JSONResponse(
        status_code=500,
        content={
            "status": "error",
            "message": str(exc),
            "detail": traceback.format_exc()
        }
    )
    
@app.post("/process_video_folder/")
async def process_video_folder(folder_path: str = Query(..., description="视频文件夹路径")):
    """处理指定文件夹中的所有视频文件"""
    try:
        if not os.path.exists(folder_path):
            return JSONResponse({
                "status": "error",
                "message": "文件夹路径不存在"
            }, status_code=404)

        processed_videos = []
        video_extensions = ['.mp4', '.avi', '.mov', '.mkv']

        for filename in os.listdir(folder_path):
            if any(filename.lower().endswith(ext) for ext in video_extensions):
                video_path = os.path.join(folder_path, filename)
                collection_name = os.path.splitext(filename)[0]

                # 提取视频帧
                frames_dir = f"extracted_frames/{collection_name}"
                total_frames = extract_frames(video_path, frames_dir,frame_interval=30)

                # 处理帧并存入数据库
                process_frames_to_chroma(frames_dir, collection_name=collection_name)

                processed_videos.append({
                    "video_name": filename,
                    "total_frames": total_frames,
                    "collection_name": collection_name
                })

        return JSONResponse({
            "status": "success",
            "message": "所有视频处理完成",
            "processed_videos": processed_videos
        })
    except Exception as e:
        return JSONResponse({
            "status": "error",
            "message": str(e)
        }, status_code=500)


@app.post("/search_image_all/")
async def search_image_all(query_image: UploadFile = File(...)):
    """在所有视频中搜索相似图片"""
    try:
        # 确保模型已加载
        if model is None or processor is None:
            load_model_and_processor()
        # 保存查询图片
        query_path = f"query_images/{query_image.filename}"
        with open(query_path, "wb") as buffer:
            shutil.copyfileobj(query_image.file, buffer)

        # 获取所有视频集合
        chroma_client = chromadb.PersistentClient(path="./chroma_db")
        collections = chroma_client.list_collections()

        all_matches = []

        for collection in collections:
            # 读取该视频的帧时间戳信息
            timestamp_file = f"extracted_frames/{collection.name}/frame_timestamps.json"
            
            if not os.path.exists(timestamp_file):
                print(f"找不到时间戳文件: {timestamp_file}")
                continue
                
            with open(timestamp_file, "r") as f:
                frame_info = json.load(f)
            
            results = search_similar_frame(query_path, collection_name=collection.name)
            
            if results and results['distances'][0]:
                for i, doc in enumerate(results['documents'][0]):
                    frame_number = int(float(doc.split()[-2]))
                    frame_key = f"frame_{frame_number}"
                    
                    if frame_key in frame_info:
                        frame_data = frame_info[frame_key]


                        distance = float(results['distances'][0][i])
                        
                        # 新的相似度计算逻辑
                        # 设置距离阈值
                        min_distance = 8.0   # 最小距离（对应最高相似度）
                        max_distance = 40.0  # 最大距离（对应最低相似度）
                        
                        # 计算相似度
                        if distance <= min_distance:
                            similarity = 100
                        elif distance >= max_distance:
                            similarity = 0
                        else:
                            # 线性映射到 0-100 范围
                            similarity = 100 * (max_distance - distance) / (max_distance - min_distance)


                        all_matches.append({
                            "video_name": collection.name,
                            "frame_number": frame_number,
                            "video_time": frame_data["time_str"],
                            "timestamp_seconds": frame_data["timestamp"],
                            "similarity":round(similarity, 2)   # 转换为 0-100 的范围

                        })

        # 按相似度排序
        all_matches.sort(key=lambda x: x['similarity'], reverse=True)
        
        # 返回前5个最相似的结果
        return JSONResponse({
            "status": "success",
            "matches": all_matches[:5]
        })
    except Exception as e:
        print(f"搜索出错: {str(e)}")
        return JSONResponse({
            "status": "error",
            "message": str(e)
        }, status_code=500)


@app.post("/get_video_clip/")
async def get_video_clip(
    video_name: str,
    timestamp: float,
    before_seconds: int = 3,
    after_seconds: int = 3
):
    temp_path = None
    try:
        print(f"请求视频片段: {video_name}, 时间戳: {timestamp}")
        
        # 构建视频文件路径
        video_path = os.path.join("videos", f"{video_name}.mp4")
        if not os.path.exists(video_path):
            print(f"视频文件不存在: {video_path}")
            raise HTTPException(status_code=404, detail="视频文件不存在")
            
        print(f"视频文件路径: {os.path.abspath(video_path)}")
        print(f"视频文件大小: {os.path.getsize(video_path)} bytes")
            
        # 计算片段的起止时间
        start_time = max(0, timestamp - before_seconds)
        duration = before_seconds + after_seconds
        
        print(f"截取时间段: {start_time} - {start_time + duration}")
        
        # 创建临时文件
        with tempfile.NamedTemporaryFile(suffix='.mp4', delete=False) as temp_file:
            temp_path = temp_file.name
            
        print(f"创建临时文件: {temp_path}")
        
        # 使用 ffmpeg 截取视频片段
        cmd = [
            'ffmpeg',
            '-y',  # 覆盖已存在的文件
            '-ss', str(start_time),
            '-i', os.path.abspath(video_path),
            '-t', str(duration),
            '-c:v', 'copy',  # 复制视频流
            '-c:a', 'copy',  # 复制音频流
            temp_path
        ]
        
        print(f"执行命令: {' '.join(cmd)}")
        
        try:
            # 使用 subprocess.run，但不捕获输出（避免编码问题）
            result = subprocess.run(
                cmd,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                check=True,
                encoding=None  # 不进行编码转换
            )
            
        except subprocess.CalledProcessError as e:
            print(f"FFmpeg 执行失败，返回码: {e.returncode}")
            raise HTTPException(status_code=500, detail="FFmpeg 执行失败")
        except Exception as e:
            print(f"FFmpeg 执行异常: {str(e)}")
            raise HTTPException(status_code=500, detail=f"FFmpeg 执行异常: {str(e)}")
            
        # 等待一小段时间确保文件写入完成
        await asyncio.sleep(0.5)
            
        # 检查生成的文件
        if not os.path.exists(temp_path):
            raise HTTPException(status_code=500, detail="临时文件未生成")
            
        file_size = os.path.getsize(temp_path)
        print(f"生成的视频片段大小: {file_size} bytes")
        
        if file_size < 1000:
            raise HTTPException(status_code=500, detail=f"生成的视频片段过小: {file_size} bytes")
            
        # 返回视频文件，但不立即删除
        response = FileResponse(
            temp_path,
            media_type='video/mp4',
            filename=f"{video_name}_clip.mp4",
            headers={"Content-Length": str(file_size)}
        )
        
        # 设置回调以在响应发送后删除文件
        async def cleanup():
            await asyncio.sleep(1)  # 等待文件传输完成
            try:
                if os.path.exists(temp_path):
                    os.unlink(temp_path)
                    print(f"临时文件已删除: {temp_path}")
            except Exception as e:
                print(f"清理临时文件失败: {str(e)}")
                
        asyncio.create_task(cleanup())
        
        return response
            
    except Exception as e:
        print(f"处理视频片段时出错: {str(e)}")
        print("详细错误信息:", traceback.format_exc())
        # 如果出错，确保清理临时文件
        if temp_path and os.path.exists(temp_path):
            try:
                os.unlink(temp_path)
                print(f"错误处理时删除临时文件: {temp_path}")
            except:
                pass
        if isinstance(e, HTTPException):
            raise e
        raise HTTPException(status_code=500, detail=str(e))


@app.get("/videos/")
async def list_videos():
    """获取所有已处理的视频集合列表"""
    try:
        chroma_client = chromadb.PersistentClient(path="./chroma_db")
        collections = chroma_client.list_collections()
        return JSONResponse({
            "status": "success",
            "videos": [{"name": col.name} for col in collections]
        })
    except Exception as e:
        return JSONResponse({
            "status": "error",
            "message": str(e)
        }, status_code=500)


def extract_frames(video_path: str, output_dir: str, frame_interval: int = 30):
    """
    提取帧并记录实际时间戳
    frame_interval: 每隔多少帧提取一次，默认30帧（约1秒）
    """
    os.makedirs(output_dir, exist_ok=True)
    frame_info = {}
    cap = cv2.VideoCapture(video_path)
    
    frame_count = 0
    saved_count = 0
    
    while True:
        ret, frame = cap.read()
        if not ret:
            break
            
        # 每隔 frame_interval 帧保存一次
        if frame_count % frame_interval == 0:
            # 获取当前帧的实际时间戳（毫秒转换为秒）
            timestamp = cap.get(cv2.CAP_PROP_POS_MSEC) / 1000.0
            
            # 保存帧图像
            frame_path = f"{output_dir}/frame_{frame_count}.jpg"
            cv2.imwrite(frame_path, frame)
            
            # 记录时间戳信息
            frame_info[f"frame_{frame_count}"] = {
                "timestamp": timestamp,
                "frame_number": frame_count,
                "time_str": format_timestamp(timestamp)
            }
            
            saved_count += 1
            
        frame_count += 1
    
    cap.release()
    
    # 保存帧信息到JSON文件
    with open(f"{output_dir}/frame_timestamps.json", "w") as f:
        json.dump(frame_info, f)
    
    print(f"总帧数: {frame_count}, 保存帧数: {saved_count}")
    return saved_count

def format_timestamp(timestamp: float) -> str:
    """将秒数转换为时:分:秒格式"""
    hours = int(timestamp // 3600)
    minutes = int((timestamp % 3600) // 60)
    seconds = int(timestamp % 60)
    return f"{hours:02d}:{minutes:02d}:{seconds:02d}"

def process_frames_to_chroma(frames_dir, collection_name="video_frames"):
    """处理图片并存入Chroma数据库"""
    model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
    processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")

    # 使用 PersistentClient 替代 Client
    chroma_client = chromadb.PersistentClient(path="./chroma_db")

    # 获取或创建集合
    try:
        collection = chroma_client.get_collection(name=collection_name)
    except:
        collection = chroma_client.create_collection(name=collection_name)

    # 处理每一帧
    for frame_file in sorted(os.listdir(frames_dir)):
        if frame_file.endswith('.jpg'):
            # 获取时间戳
            timestamp = float(frame_file.split('_')[1].split('.')[0])

            # 处理图片
            image_path = os.path.join(frames_dir, frame_file)
            image = Image.open(image_path)
            inputs = processor(images=image, return_tensors="pt")

            # 获取图像特征
            with torch.no_grad():
                image_features = model.get_image_features(**inputs)

            # 将特征转换为列表并存入Chroma
            embedding = image_features.numpy().flatten().tolist()
            collection.add(
                embeddings=[embedding],
                documents=[f"Frame at {timestamp} seconds"],
                ids=[f"frame_{timestamp}"]
            )


def search_similar_frame(query_image_path, collection_name="video_frames"):
    """搜索相似帧"""
    # 初始化CLIP模型
    model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
    processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")

    # 处理查询图片
    query_image = Image.open(query_image_path)
    inputs = processor(images=query_image, return_tensors="pt")

    with torch.no_grad():
        query_features = model.get_image_features(**inputs)

    # 使用 PersistentClient
    chroma_client = chromadb.PersistentClient(path="./chroma_db")

    # 获取集合
    try:
        collection = chroma_client.get_collection(name=collection_name)
    except Exception as e:
        print(f"Collection {collection_name} not found: {str(e)}")
        return None

    # 在Chroma中搜索
    results = collection.query(
        query_embeddings=query_features.numpy().flatten().tolist(),
        n_results=5
    )

    return results


if __name__ == "__main__":
    uvicorn.run(app, host="0.0.0.0", port=8000)