import os
import re
import ffmpeg
import requests
import hashlib
import asyncio
from datetime import datetime
from fastapi import FastAPI, BackgroundTasks, HTTPException, status
from pydantic import BaseModel
from scenedetect import detect, ContentDetector
from typing import List, Dict, Optional
from filter_video import FilterVideo
from google_speech import GoogleSpeechTranscriber
from json_parser import extract_json_from_string
from tiktok_downloader import VideoDownloader

class AIRequest(BaseModel):
    timestamp: str
    rm: str
    key: str
    text: str
    type: Optional[str] = None
    prompt: Optional[str] = None
    from_text: Optional[str] = None
    history: Optional[List[Dict[str, str]]] = None

app = FastAPI()
SECRET_KEY = "ghijklmnopq"

status_flag = False

OUTPUT_MP3_DIR = "/home/data/mp3"
OUTPUT_IMG_DIR = "/home/data/image"
DOWNLOAD_DIR = "/home/data/mp4"

os.makedirs(OUTPUT_MP3_DIR, exist_ok=True)
os.makedirs(OUTPUT_IMG_DIR, exist_ok=True)
os.makedirs(DOWNLOAD_DIR, exist_ok=True)

@app.get("/process-video")
async def process_video(url: str, video_id: str, language: str, background_tasks: BackgroundTasks):
    global status_flag
    response = not status_flag
    if status_flag:
        return {"status": response, "message": "任务正在进行"}
    status_flag = True
    background_tasks.add_task(video_pipeline, url, video_id, language)
    return {"status": response, "message": "任务已提交"}

@app.get("/process-local-video")
async def process_local_video(video_path: str, video_id: str, language: str, background_tasks: BackgroundTasks):
    global status_flag
    response = not status_flag
    if status_flag:
        return {"status": response, "message": "任务正在进行"}
    status_flag = True
    background_tasks.add_task(parse_video_pipeline, video_path, video_id, language)
    return {"status": response, "message": "任务已提交"}

@app.get("/process-ai")
async def process_ai(mp3_path: str, video_id: str, language: str, background_tasks: BackgroundTasks):
    global status_flag
    response = not status_flag
    if status_flag:
        return {"status": response, "message": "任务正在进行"}
    status_flag = True
    background_tasks.add_task(ai_pipeline, mp3_path, video_id, language)
    return {"status": response, "message": "任务已提交"}

def is_url(path):
    return path.startswith("http://") or path.startswith("https://")

def download_file(video_url, video_id):
    video_filename = FilterVideo.get_video_filename(video_url)
    call_java_api("DOWNLOADING", {"video_id": video_id})
    downloader = VideoDownloader(save_path=DOWNLOAD_DIR)

    try:
        video_path = asyncio.run(downloader.download_video(video_url, custom_name=video_filename))
    except Exception as e:
        print(f"❌ 异步下载异常: {e}")
        call_java_api("ERROR", {"video_id": video_id, "errormsg": str(e)})
        return None

    if video_path and len(video_path) >= 10:
        video_time = get_audio_duration(video_path)
        call_java_api("DOWNLOAD", {"video_id": video_id, "video_path": video_path, "video_time": video_time})
        return video_path
    else:
        call_java_api("ERROR", {"video_id": video_id, "errormsg": "视频下载失败"})
        return None

def video_pipeline(url, video_id, language):
    try:
        if is_url(url):
            video_path = download_file(url, video_id)
        else:
            video_path = os.path.join(DOWNLOAD_DIR, url.replace('/profile/mp4', ''))
            video_time = get_audio_duration(video_path)
            call_java_api("DOWNLOAD", {"video_id": video_id, "video_time": video_time})

        if not video_path or len(video_path) <= 5:
            return
        audio_path, keyframes = extract_audio_and_frames(video_path, video_id)
        if not audio_path or len(audio_path) <= 5:
            return
        ai_pipeline(audio_path, video_id, language)
    finally:
        global status_flag
        status_flag = False

def parse_video_pipeline(video_path, video_id, language):
    try:
        audio_path, keyframes = extract_audio_and_frames(video_path, video_id)
        ai_pipeline(audio_path, video_id, language)
    finally:
        global status_flag
        status_flag = False

def extract_audio_and_frames(video_path, video_id):
    audio_path = os.path.join(OUTPUT_MP3_DIR, f"{video_id}.mp3")
    ffmpeg.input(video_path).output(audio_path, format="mp3", acodec="libmp3lame").overwrite_output().run()
    keyframes = []  # 可切换 extract_keyframes(video_path, video_id)
    call_java_api("EXTRACT", {"video_id": video_id, "mp3": audio_path, "frames": keyframes})
    return audio_path, keyframes

def ai_pipeline(audio_path, video_id, language):
    try:
        transcriber = GoogleSpeechTranscriber("zeta-courage-452114-u1")
        result = transcriber.transcribe_audio(audio_path, language)
        call_java_api("END", {"video_id": video_id, "ai_data": result})
    finally:
        global status_flag
        status_flag = False

def call_java_api(status, data):
    print(f"状态: {status}")
    print(f"data: {data}")
    java_api_url = "http://serverapp:8080/video-analysis/public/fghijklmnop/change-status"
    try:
        requests.post(java_api_url, json={"status": status, "data": data})
    except Exception as e:
        print(f"调用 Java 失败: {e}")

@app.post("/ai-process")
async def ai_process(request: AIRequest):
    timestamp = request.timestamp
    rm = request.rm
    key = request.key
    text = request.text
    type = request.type
    prompt = request.prompt
    from_text = request.from_text
    history = request.history

    data_list = [str(timestamp), rm, SECRET_KEY]
    data_list.sort()
    combined = ''.join(data_list)
    computed_signature = hashlib.sha256(combined.encode()).hexdigest()
    print(f"[{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}] : 比较签名")

    if computed_signature != key:
        raise HTTPException(
            status_code=status.HTTP_404_NOT_FOUND,
            detail={
                "error": "Signature verification failed",
                "message": "无效的验证密钥",
                "received_signature": key,
                "computed_signature": computed_signature
            }
        )
    transcriber = GoogleSpeechTranscriber("zeta-courage-452114-u1")
    result = transcriber.transcribe(text, prompt, type, history)
    print(f"[{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}] : 调用结束")
    return {"status": 200, "message": result}

def get_audio_duration(audio_path):
    probe = ffmpeg.probe(audio_path)
    duration = float(probe['format']['duration'])
    return duration

if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=8000, reload=True)
