import io
import re
from typing import List, Optional

import gradio.processing_utils as processing_utils
import soundfile as sf
import torch
from FlagEmbedding import BGEM3FlagModel, FlagReranker
from gradio_client import utils as client_utils
from pydantic import BaseModel, ConfigDict
from qwen_omni_utils import process_mm_info
from transformers import (
    Qwen2_5OmniForConditionalGeneration,
    Qwen2_5OmniProcessor,
)

__all__ = ["predict", "embedding", "rerank", "format_history", "RerankResult"]


class RerankResult(BaseModel):
    class Document(BaseModel):
        text: str

        model_config = ConfigDict(json_schema_extra={"example": {"text": "doc3"}})

    document: Optional[Document]
    index: int
    relevance_score: float

    model_config = ConfigDict(
        json_schema_extra={
            "example": {
                "document": {"text": "doc3"},
                "index": 2,
                "relevance_score": 0.00019110432,
            }
        }
    )


# Voice settings
VOICE_LIST = ["Cherry", "Ethan", "Serena", "Chelsie"]
DEFAULT_VOICE = "Ethan"

default_prompt = """你是地铁站智能客服，能够感知听觉和视觉输入，并能生成文本和语音。超出范围的问题请你委婉拒绝用户。"""

# Initialize model and processor
model = Qwen2_5OmniForConditionalGeneration.from_pretrained(
    "Qwen/Qwen2.5-Omni-7B",
    torch_dtype=torch.bfloat16,
    device_map="auto",
    # attn_implementation="flash_attention_2",
)

processor = Qwen2_5OmniProcessor.from_pretrained("Qwen/Qwen2.5-Omni-7B")

# Load embedding model
embedding_model = BGEM3FlagModel("BAAI/bge-m3", use_fp16=True)


rerank_model = FlagReranker(
    "BAAI/bge-reranker-v2-m3", use_fp16=True
)  # Setting use_fp16 to True speeds up computation with a slight performance degradation


def format_history(history: list, prompt: str = default_prompt, oss_cache=None):
    messages = []
    for item in history:
        if isinstance(item["content"], str):
            messages.append({"role": item["role"], "content": item["content"]})
        elif item["role"] == "user" and (
            isinstance(item["content"], list) or isinstance(item["content"], tuple)
        ):
            file_path = item["content"][0]

            oss_cache[file_path] = file_path

            mime_type = client_utils.get_mimetype(file_path)

            if mime_type and mime_type.startswith("audio"):
                messages.append(
                    {
                        "role": item["role"],
                        "content": [
                            {
                                "type": "text",
                                "text": prompt,
                            },
                            {
                                "type": "audio",
                                "audio": file_path,
                            },
                        ],
                    }
                )
            else:
                messages.append(
                    {
                        "role": item["role"],
                        "content": prompt,
                    }
                )

    return messages


def messages_to_text(messages):
    text = processor.apply_chat_template(
        messages, add_generation_prompt=True, tokenize=False
    )
    audios, images, videos = process_mm_info(messages, use_audio_in_video=True)
    inputs = processor(
        text=text,
        audio=audios,
        images=images,
        videos=videos,
        return_tensors="pt",
        padding=True,
        use_audio_in_video=True,
    )
    inputs = inputs.to(model.device).to(model.dtype)

    response_text = ""

    # Stream text first
    text_ids = model.generate(
        **inputs,
        use_audio_in_video=True,
        return_audio=False,
        speaker=DEFAULT_VOICE,
        max_new_tokens=500,
        temperature=0.0,
    )
    response_text = processor.batch_decode(
        text_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False
    )[0]

    response_text = re.findall(
        "\nassistant\n(.*?)$", response_text, re.DOTALL | re.MULTILINE
    )[-1]
    return response_text


def chat(messages):
    text = processor.apply_chat_template(
        messages, add_generation_prompt=True, tokenize=False
    )
    inputs = processor(
        text=text,
        return_tensors="pt",
        padding=True,
    )
    inputs = inputs.to(model.device).to(model.dtype)

    response_text = ""

    # Stream text first
    text_ids = model.generate(
        **inputs,
        use_audio_in_video=True,
        return_audio=False,
        speaker=DEFAULT_VOICE,
        max_new_tokens=500,
    )
    response_text = processor.batch_decode(
        text_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False
    )[0]

    response_text = re.findall("\nassistant\n(.*?)$", response_text, re.DOTALL)[-1]

    return response_text


def predict(messages):
    messages = [
        {
            "role": "system",
            "content": [
                {
                    "type": "text",
                    "text": "You are Qwen, a virtual human developed by the Qwen Team, Alibaba Group, capable of perceiving auditory and visual inputs, as well as generating text and speech.",
                }
            ],
        }
    ] + messages
    # Prepare inputs for the model
    text = processor.apply_chat_template(
        messages, add_generation_prompt=True, tokenize=False
    )
    audios, images, videos = process_mm_info(messages, use_audio_in_video=True)
    inputs = processor(
        text=text,
        audio=audios,
        images=images,
        videos=videos,
        return_tensors="pt",
        padding=True,
        use_audio_in_video=True,
    )
    inputs = inputs.to(model.device).to(model.dtype)

    response_text = ""

    # Stream text first
    text_ids, audio = model.generate(
        **inputs,
        use_audio_in_video=True,
        return_audio=True,
        speaker=DEFAULT_VOICE,
        max_new_tokens=500,
    )
    response_text = processor.batch_decode(
        text_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False
    )[0]

    response_text = re.findall(
        "\nassistant\n(.*?)$", response_text, re.DOTALL | re.MULTILINE
    )[-1]
    yield {"type": "text", "data": response_text}

    # Generate audio after text completion
    _, audio = model.generate(
        **inputs, use_audio_in_video=True, speaker=DEFAULT_VOICE, max_new_tokens=500
    )

    # Save audio to a file
    audio_np = audio.reshape(-1).detach().cpu().numpy()
    wav_io = io.BytesIO()
    sf.write(wav_io, audio_np, samplerate=24000, format="WAV")
    wav_io.seek(0)
    wav_bytes = wav_io.getvalue()
    audio_path = processing_utils.save_bytes_to_cache(
        wav_bytes, "audio.wav", cache_dir="./audio_cache/"
    )

    yield {"type": "audio", "data": audio_path}


def embedding(input_text: str) -> List[float]:
    embeddings = embedding_model.encode(
        [input_text],
        batch_size=8,
        max_length=8192,  # If you don't need such a long length, you can set a smaller value to speed up the encoding process.
    )["dense_vecs"]
    return embeddings[0].tolist()


def rerank(
    documents: List[str],
    query: str,
) -> List[RerankResult]:
    pairs = []
    for _, doc in enumerate(documents):
        pairs.append([query, doc])

    scores = rerank_model.compute_score(pairs)

    results = []
    for idx, score in enumerate(scores):
        results.append(
            {
                "index": idx,
                "document": documents[idx],
                "relevance_score": score,
            }
        )

    sorted_results = sorted(results, key=lambda x: x["relevance_score"], reverse=True)

    return [RerankResult.model_validate(item) for item in sorted_results]