# main.py
from io import BytesIO
from urllib.request import urlopen
import librosa
from fastapi import FastAPI, HTTPException, UploadFile, Form
from pydantic import BaseModel
from typing import List, Union
from transformers import Qwen2AudioForConditionalGeneration, AutoProcessor
import torch
import os
import logging
# 初始化 FastAPI 应用
app = FastAPI()

# 加载模型和处理器
processor = AutoProcessor.from_pretrained("/models/other_models/qwen2audio7b_instruct")
model = Qwen2AudioForConditionalGeneration.from_pretrained(
    "/models/other_models/qwen2audio7b_instruct", device_map="auto"
)

# logger 保存至 ./server.log
# 设置基本配置
logging.basicConfig(level=logging.INFO)
# 格式
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# 文件处理器
file_handler = logging.FileHandler("./server.log")
file_handler.setFormatter(formatter)
# 添加文件处理器
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)  # 设置日志级别
logger.addHandler(file_handler)
# 添加控制台处理器
logger.addHandler(logging.StreamHandler())


class ConversationMessage(BaseModel):
    role: str
    content: Union[str, List[dict]]  # 支持文本内容和音频文件的复合结构

class ConversationInput(BaseModel):
    text: str
    url: str
    start: float
    end: float

@app.post("/process_audio")
async def process_audio(input_data: ConversationInput):
    text = input_data.text
    url = input_data.url

    conversation = [{"role": "user", "content": [
        {"type": "audio", "audio_url": url},
        {"type": "text", "text": text},
    ]}]
    text = processor.apply_chat_template(conversation, add_generation_prompt=True, tokenize=False)
    audios = []
    for message in conversation:
        if isinstance(message["content"], list):
            for ele in message["content"]:
                if ele["type"] == "audio":
                    if isinstance(ele["audio_url"], str):
                        # 处理 URL
                        try:
                            # if url start with local, then load from local
                            if ele["audio_url"].startswith("local:"):
                                audio_data, _ = librosa.load(ele["audio_url"].replace("local:", ""), sr=processor.feature_extractor.sampling_rate)
                                # load audio from start to end
                                logger.info(input_data)
                                
                                start = int(input_data.start * processor.feature_extractor.sampling_rate)
                                end = int(input_data.end * processor.feature_extractor.sampling_rate)
                                if end<0:
                                    audio_data = audio_data[start:]
                                else:
                                    audio_data = audio_data[start:end]
                                logger.info(f"Loaded audio from {start} to {end}")
                                logger.info(f"Audio length: {len(audio_data)}")
                            else:
                                audio_data, _ = librosa.load(BytesIO(urlopen(ele["audio_url"]).read()), sr=processor.feature_extractor.sampling_rate)
                                
                                start = int(input_data.start * processor.feature_extractor.sampling_rate)
                                end = int(input_data.end * processor.feature_extractor.sampling_rate)
                                if end<0:
                                    audio_data = audio_data[start:]
                                else:
                                    audio_data = audio_data[start:end]
                        except Exception as e:
                            raise HTTPException(status_code=400, detail=f"Error processing audio from {ele['audio_url']}: {str(e)}")
                    elif isinstance(ele["audio_url"], UploadFile):
                        # 处理上传文件
                        try:
                            audio_data, _ = librosa.load(BytesIO(await ele["audio_url"].read()), sr=processor.feature_extractor.sampling_rate)
                        except Exception as e:
                            raise HTTPException(status_code=400, detail=f"Error processing uploaded audio file: {str(e)}")
                    else:
                        raise HTTPException(status_code=400, detail="Invalid audio file format.")
                    audios.append(audio_data)

    # 构造模型输入
    inputs = processor(text=text, audios=audios, return_tensors="pt", padding=True)
    inputs.input_ids = inputs.input_ids.to("cuda")
    
    # 调用模型生成响应
    generate_ids = model.generate(**inputs, max_length=256)
    generate_ids = generate_ids[:, inputs.input_ids.size(1):]
    response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
    
    return {"response": response}

# 启动方式: CUDA_VISIBLE_DEVICES=2,3 uvicorn qwen2audio_api:app --host 0.0.0.0 --port 8012
