from __future__ import annotations

import base64
import copy
import json
import logging
import math
import os
import subprocess
import sys
import time
import warnings

warnings.filterwarnings("ignore")
from functools import lru_cache
from io import BytesIO
from typing import Optional

import audioread
import av
import librosa
import numpy as np
import requests
import soundfile as sf
import torch
import torchvision
from packaging import version
from PIL import Image
from qwen_vl_utils import process_vision_info
from torchvision import io, transforms
from torchvision.transforms import InterpolationMode
from transformers import Qwen2_5OmniForConditionalGeneration, Qwen2_5OmniProcessor
from transformers.generation.logits_process import LogitsProcessor
from torch.multiprocessing import Process, Queue, set_start_method
import shutil

from transformers import modeling_utils

if not hasattr(modeling_utils, "ALL_PARALLEL_STYLES") or modeling_utils.ALL_PARALLEL_STYLES is None:
    modeling_utils.ALL_PARALLEL_STYLES = ["tp", "none", "colwise", 'rowwise']

SAMPLE_RATE = 16000


def _check_if_video_has_audio(video_path):
    container = av.open(video_path)
    audio_streams = [stream for stream in container.streams if stream.type == "audio"]
    if not audio_streams:
        return False
    return True


def process_audio_info(conversations: list[dict] | list[list[dict]], use_audio_in_video: bool):
    """
    Read and process audio info

    Support dict keys:

    type = audio
    - audio
    - audio_start
    - audio_end

    type = video
    - video
    - video_start
    - video_end
    """
    audios = []
    if isinstance(conversations[0], dict):
        conversations = [conversations]
    for conversation in conversations:
        for message in conversation:
            if not isinstance(message["content"], list):
                continue
            for ele in message["content"]:
                if ele["type"] == "audio":
                    if "audio" in ele or "audio_url" in ele:
                        path = ele.get("audio", ele.get("audio_url"))
                        audio_start = ele.get("audio_start", 0.0)
                        audio_end = ele.get("audio_end", None)
                        if isinstance(path, np.ndarray):
                            if path.ndim > 1:
                                raise ValueError("Support only mono audio")
                            audios.append(
                                path[int(SAMPLE_RATE * audio_start): None if audio_end is None else int(
                                    SAMPLE_RATE * audio_end)]
                            )
                            continue
                        elif path.startswith("data:audio"):
                            _, base64_data = path.split("base64,", 1)
                            data = BytesIO(base64.b64decode(base64_data))
                        elif path.startswith("http://") or path.startswith("https://"):
                            data = audioread.ffdec.FFmpegAudioFile(path)
                        elif path.startswith("file://"):
                            data = path[len("file://"):]
                        else:
                            data = path
                    else:
                        raise ValueError("Unknown audio {}".format(ele))
                elif use_audio_in_video and ele["type"] == "video":
                    if "video" in ele or "video_url" in ele:
                        path = ele.get("video", ele.get("video_url"))
                        audio_start = ele.get("video_start", 0.0)
                        audio_end = ele.get("video_end", None)
                        assert _check_if_video_has_audio(
                            path
                        ), "Video must has audio track when use_audio_in_video=True"
                        if path.startswith("http://") or path.startswith("https://"):
                            data = audioread.ffdec.FFmpegAudioFile(path)
                        elif path.startswith("file://"):
                            data = path[len("file://"):]
                        else:
                            data = path
                    else:
                        raise ValueError("Unknown video {}".format(ele))
                else:
                    continue
                audios.append(
                    librosa.load(
                        data,
                        sr=SAMPLE_RATE,
                        offset=audio_start,
                        duration=(audio_end - audio_start) if audio_end is not None else None,
                    )[0]
                )
    if len(audios) == 0:
        audios = None
    return audios


def process_mm_info(conversations, use_audio_in_video, return_video_kwargs=False):
    audios = process_audio_info(conversations, use_audio_in_video)
    vision = process_vision_info(conversations, return_video_kwargs=return_video_kwargs)
    return (audios,) + vision


class ModifiedRepetitionPenaltyLogitsProcessor(LogitsProcessor):
    r"""
    [`LogitsProcessor`] enforcing an exponential penalty on repeated sequences.

    Args:
        repetition_penalty (`float`):
            The parameter for repetition penalty. 1.0 means no penalty. See [this
            paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
    """

    def __init__(self, penalty: float, window: int, processor):
        if not isinstance(penalty, float) or not (penalty > 0):
            raise ValueError(f"`penalty` has to be a strictly positive float, but is {penalty}")
        # pdb.set_trace()
        self.processor = processor
        # high freq words
        # ['，', ' ', '的', '。', '\n', '了', '会', '要', '、', '和', '-', '能', '以', '讨', '论', '一', '用', '在', '议', '题', '需', '问', '个', '有', '如', '可', '提', '是', '到']
        # glm v2/v3 tokenizer
        # self.white_list = torch.tensor([31123, 0, 54530, 31155, 0, 54537, 54549, 54552, 31201, 54542, 30941, 54558, 54548, 55586, 54903, 54531, 54571, 54534, 54910, 54736, 54828, 54761, 54550, 54536, 54627, 54568, 54646, 54532, 54555])
        self.white_list = torch.tensor([0])
        self.penalty = penalty
        self.window = window

    def find_last_sublist_tensor_optimized(self, lst, sublist=[151644, 77091, 198]):
        sublist = torch.tensor(sublist, device=lst.device)
        sublist_length = sublist.size(0)
        # 如果子序列中的某个元素是唯一的，可以作为关键索引
        # 这里我们使用第一个元素作为关键索引
        key_index = 0  # 使用第一个元素作为关键点

        results = []
        for batch in lst:
            # 查找子序列第一个元素的所有出现位置
            first_element_positions = (batch == sublist[0]).nonzero(as_tuple=True)[0]
            found = False
            # 从后向前检查
            for pos in first_element_positions.flip(dims=(0,)):
                # 检查是否有足够的空间容纳整个子序列
                if pos.item() + sublist_length <= batch.size(0):
                    # 检查整个子序列是否匹配
                    if torch.all(batch[pos.item(): pos.item() + sublist_length] == sublist):
                        results.append(max(pos.item() + sublist_length, batch.shape[0] - self.window))
                        found = True
                        break
            if not found:
                results.append(batch.shape[0] + 1)
        return results

    def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
        if self.penalty == 1.0:
            return scores

        batch_size = input_ids.shape[0]

        # 1. First apply the original repetition penalty reversal
        score = torch.gather(scores, 1, input_ids)
        score = torch.where(score >= 0, score * self.penalty, score / self.penalty)
        scores.scatter_(1, input_ids, score)

        # 2. Then apply modified repetition penalty for each sequence in batch
        response_indices = self.find_last_sublist_tensor_optimized(input_ids)

        # Process each sequence in batch
        for i in range(batch_size):
            # Get the windowed tail for this sequence
            start_idx = response_indices[i]
            seq_input_ids = input_ids[i, start_idx:]

            # Remove white list tokens
            mask = torch.isin(seq_input_ids, self.white_list.to(seq_input_ids.device))
            seq_input_ids = seq_input_ids[~mask]

            if seq_input_ids.numel() == 0:
                continue

            # Calculate counts and penalties
            counts = torch.bincount(seq_input_ids.view(-1), minlength=scores.shape[1])
            counts = counts[seq_input_ids.view(-1)].view(seq_input_ids.shape)
            new_penalty = self.penalty ** counts
            new_penalty = new_penalty.to(scores.dtype)

            # Apply penalty to this sequence's scores
            seq_input_ids = seq_input_ids.unsqueeze(0)  # [1, seq_len]
            score = torch.gather(scores[i:i + 1], 1, seq_input_ids)
            score = torch.where(score < 0, score * new_penalty, score / new_penalty)
            scores[i:i + 1].scatter_(1, seq_input_ids, score)

        return scores


def worker_process(model_name, gpu_id, data_chunk, output_queue, batch_size):
    """
    工作进程函数，在每个GPU上独立运行
    gpu_id: 使用的GPU ID
    data_chunk: 分配的数据块(conversations和uttids元组)
    output_queue: 结果输出队列
    batch_size: 批处理大小
    """
    # 设置当前进程使用的GPU
    torch.cuda.set_device(gpu_id)
    device = torch.device(f"cuda:{gpu_id}")

    # 在工作进程内加载模型和处理器
    processor = Qwen2_5OmniProcessor.from_pretrained(
        "/apdcephfs/private_kaixunhuang/data/pretrained_models/Qwen/Qwen2.5-Omni-7B")
    if model_name == "qwen_pretrain":
        model = Qwen2_5OmniForConditionalGeneration.from_pretrained(
            "/apdcephfs/private_kaixunhuang/data/pretrained_models/Qwen/Qwen2.5-Omni-7B",
            attn_implementation="flash_attention_2",
            torch_dtype=torch.bfloat16, device_map=f"cuda:{gpu_id}")
    elif model_name == "qwen_ft_v0.50":
        model = Qwen2_5OmniForConditionalGeneration.from_pretrained(
            "/apdcephfs/private_kaixunhuang/workspace/metagron-lm-meeting/exp/qwen2_5omni_thinker_train_v0.5_TP4_PP1_CP1_MBZ8_GBSZ2880_seq1024/hf_model",
            attn_implementation="flash_attention_2",
            torch_dtype=torch.bfloat16, device_map=f"cuda:{gpu_id}")
    elif model_name == "qwen_ft_sot_v0.11":
        # model = Qwen2_5OmniForConditionalGeneration.from_pretrained("/apdcephfs/private_kaixunhuang/workspace/metagron-lm-meeting/exp/qwen2_5omni_thinker_sot_v0.22_TP4_PP1_CP1_MBZ8_GBSZ2880_seq1024/hf_model",
        #                                                         attn_implementation="flash_attention_2",
        #                                                         torch_dtype=torch.bfloat16, device_map=f"cuda:{gpu_id}")
        model = Qwen2_5OmniForConditionalGeneration.from_pretrained(
            "/apdcephfs/private_kaixunhuang/workspace/metagron-lm-meeting/exp/qwen2_5omni_thinker_sot_v0.11_TP4_PP1_CP1_MBZ8_GBSZ1440_seq2048/hf_model",
            torch_dtype="auto", device_map=f"cuda:{gpu_id}")
    elif model_name == "qwen_ft_think1.5":
        model = Qwen2_5OmniForConditionalGeneration.from_pretrained(
            "/apdcephfs/private_kaixunhuang/workspace/metagron-lm-meeting/exp/qwen2_5omni_thinker_asr_think_v1.5_analyze_TP4_PP1_CP1_MBZ8_GBSZ1440_seq1800/hf_model",
            torch_dtype="auto", device_map=f"cuda:{gpu_id}")
    model.eval()

    conversations_chunk, uttid_chunk = data_chunk
    results = []

    # 处理分配的数据块
    for start_idx in range(0, len(conversations_chunk), batch_size):
        end_idx = start_idx + batch_size
        batch_conversations = conversations_chunk[start_idx:end_idx]
        batch_uttids = uttid_chunk[start_idx:end_idx]

        # 准备文本模板
        text = processor.apply_chat_template(batch_conversations, add_generation_prompt=True, tokenize=False)
        text = [t.replace("<|audio_bos|><|AUDIO|><|audio_eos|>", "") for t in text]
        text = [t.replace("<|audio_placeholder|>", "<|audio_bos|><|AUDIO|><|audio_eos|>") for t in text]

        # 处理多媒体信息
        audios, images, videos = process_mm_info(batch_conversations, use_audio_in_video=True)

        # 模型输入处理
        inputs = processor(
            text=text,
            audio=audios,
            images=images,
            videos=videos,
            return_tensors="pt",
            padding=True,
            use_audio_in_video=True
        )
        inputs = inputs.to(device).to(model.dtype)

        thinker_max_new_tokens = 892
        # 生成文本结果
        with torch.no_grad():
            text_ids = model.generate(
                **inputs,
                return_audio=False,
                thinker_do_sample=False,
                thinker_max_new_tokens=thinker_max_new_tokens,
                do_sample=False,
                repetition_penalty=1.01,
                use_audio_in_video=True
            )

        for i in range(text_ids.shape[0]):
            wavdur = audios[i].shape[0] / 16000
            filling_token = [151643, 151646]
            mask = torch.ones_like(text_ids[i], dtype=torch.bool)
            for val in filling_token:
                mask &= (text_ids[i] != val)
            token_count = mask.sum().item()
            # if token_count - 20 > wavdur * 15 or token_count > thinker_max_new_tokens:
            if token_count > thinker_max_new_tokens:
                first_decoded_text = processor.batch_decode(
                    text_ids[i:i + 1],
                    skip_special_tokens=True,
                    clean_up_tokenization_spaces=False
                )[0].split("\n")[-1]
                rep_penalty = 1.1
                inputs_ = processor(
                    text=text[i:i + 1],
                    audio=audios[i:i + 1],
                    images=images,
                    videos=videos,
                    return_tensors="pt",
                    padding=True,
                    use_audio_in_video=True
                )
                inputs_ = inputs_.to(device).to(model.dtype)
                with torch.no_grad():
                    text_ids_ = model.generate(
                        **inputs_,
                        return_audio=False,
                        thinker_do_sample=False,
                        thinker_max_new_tokens=thinker_max_new_tokens,
                        use_audio_in_video=True,
                        logits_processor=[ModifiedRepetitionPenaltyLogitsProcessor(rep_penalty, 50, processor)]
                    )
                    second_decoded_text = processor.batch_decode(
                        text_ids_,
                        skip_special_tokens=True,
                        clean_up_tokenization_spaces=False
                    )[0].split("\n")[-1].split("：")[-1].split(":")[-1]
                    print(f"uttid {batch_uttids[i]} first decode {token_count} tokens : ", first_decoded_text)
                    print(f"uttid {batch_uttids[i]} second decode", second_decoded_text)
                    if text_ids_.shape[1] < text_ids.shape[1]:
                        # padding text_ids_ to match text_ids
                        text_ids_ = torch.cat([
                            text_ids_,
                            torch.full((text_ids_.shape[0], text_ids.shape[1] - text_ids_.shape[1]), 151643,
                                       dtype=text_ids_.dtype, device=text_ids_.device)
                        ], dim=1)
                        text_ids[i:i + 1, :] = text_ids_

        # 解码识别结果
        decoded_text = processor.batch_decode(
            text_ids,
            skip_special_tokens=True,
            clean_up_tokenization_spaces=False
        )

        # 处理每个结果
        for utt_idx, uttid in enumerate(batch_uttids):
            utt_text = decoded_text[utt_idx]
            # utt_text = utt_text.split("\n")[-1]
            utt_text = utt_text.split("assistant\n")[-1]
            utt_text = utt_text.replace("\n", "")
            results.append(f"{uttid} {utt_text}\n")
        if gpu_id == 0:
            if start_idx % 10 == 0:
                print(decoded_text[0])
            print(f"processed {start_idx} / {len(conversations_chunk)}")

    # 将结果放入队列
    output_queue.put((gpu_id, results))

def do_convert_jsonl_to_conversations_and_uttids(dict_list, model_name, train_set):
    # 读取并预处理所有数据
    all_conversations = []
    all_uttids = []
    for data in dict_list:
        if "key" in data:
            uttid = data["key"]
        elif "id" in data:
            uttid = data["id"]
        elif "uttid" in data:
            uttid = data["uttid"]
        else:
            raise Exception(f"uttid not found in data {data}")

        if "wav" in data:
            wav_path = data["wav"]
        elif "wavs" in data:
            wav_path = data["wavs"][0]
        else:
            raise Exception(f"wav not found in data {data}")

        if model_name == "qwen_pretrain":
            if "libri" in train_set:
                # 英文测试集
                system_prompt = "You are a speech recognition model."
                user_prompt = "Transcribe the English audio into text without any punctuation marks.<|audio_placeholder|>"
            else:
                # 中文测试集
                system_prompt = "You are a speech recognition model."
                user_prompt = "请将这段中文语音转换为纯文本，去掉标点符号。<|audio_placeholder|>"
        else:
            if "task" in data and "asr_sot" in data['task']:
                system_prompt = "You are a speech recognition model."
                # user_prompt = "请将这段语音转换为纯文本，去掉标点符号，在不同说话人的转录前添加说话人标签，在每个说话人转录内容前后添加时间戳。<|audio_placeholder|>"
                user_prompt = "请将这段语音转换为纯文本，去掉标点符号，在不同说话人的转录前添加说话人标签。<|audio_placeholder|>"
            elif "task" in data and "asr_think" in data['task']:
                system_prompt = "You are a helpful assistant."
                user_prompt = "请分析这段语音的内容，并结合预识别结果分析语音中的专有名词和专业术语。<|audio_placeholder|>"
            elif "task" in data and "s2pinyin" in data['task']:
                system_prompt = "You are a helpful assistant."
                user_prompt = "将这段音频转录为纯文本，其中中文转成拼音，英文保留原样。<|audio_placeholder|>"
            else:
                system_prompt = "You are a speech recognition model."
                user_prompt = "请将这段语音转换为纯文本，去掉标点符号。<|audio_placeholder|>"
        if "context" in data and data["context"] is not None and len(data["context"]) > 0:
            user_prompt = "###Context\n" + data["context"].replace("\\n", "\n").replace("\\N",
                                                                                        "\n") + "\n###Instruct\n" + user_prompt

        conversation = [
            {
                "role": "system",
                "content": [
                    {"type": "text", "text": system_prompt},
                ],
            },
            {
                "role": "user",
                "content": [
                    {"type": "text", "text": user_prompt},
                    {"type": "audio", "audio": wav_path},
                ],
            }
        ]

        all_conversations.append(conversation)
        all_uttids.append(uttid)
    return all_conversations, all_uttids

from gxl_ai_utils.utils import utils_file
def main():
    # export CUDA_VISIBLE_DEVICES="0, 1, 2, 3"
    # export CUDA_VISIBLE_DEVICES="2"
    set_start_method('spawn', force=True)

    # batch_size = 3
    batch_size = 4
    n_gpus = 4

    # 初始化输入文件和输出文件
    model_name = "qwen_pretrain"
    # model_name = "qwen_ft_v0.50"
    # model_name = "qwen_ft_think1.5"

    # 测试数据路径列表
    test_data_list = [
        "/apdcephfs_qy3/share_976139/users/kaixunhuang/buf/test_dir/test_20250429_wenetspeech_test_net.jsonl",
        "/apdcephfs_qy3/share_976139/users/kaixunhuang/buf/test_dir/test_aishell4.jsonl",
        "/apdcephfs_qy3/share_976139/users/kaixunhuang/buf/test_dir/test_en_gigaspeech.jsonl",
        "/apdcephfs_qy3/share_976139/users/xuelonggeng/data/asr_test/aishell_test/data.list"
    ]

    # 测试集名称列表（与路径一一对应）
    testset_name_list = [
        "testnet_test",
        "aishell4_test",
        "gigaspeech_test",
        "aishell1_test"
    ]

    test_data_list=[
        "/apdcephfs_qy3/share_976139/users/xuelonggeng/data/asr_test/librispeechh_clean/test_20220407_librispeech_clean_org_fbankhires.jsonl",
        "/teaspeech_ceph/share_976139/users/kaixunhuang/buf/test_dir/test_20220407_librispeech_other_org_fbankhires_local.jsonl",
        "/teaspeech_ceph/share_976139/users/kaixunhuang/buf/test_dir/test_20211229_wenetspeech_meeting_org_fbankhires.jsonl",
        "/teaspeech_ceph/share_976139/users/kaixunhuang/buf/test_dir/test_20241119_aishell2_android.jsonl",
        "/teaspeech_ceph/share_976139/users/kaixunhuang/buf/test_dir/test_20241119_aishell2_ios.jsonl",
        "/teaspeech_ceph/share_976139/users/kaixunhuang/buf/test_dir/test_20241119_aishell2_mic.jsonl"
    ]
    testset_name_list=[
        "librispeech_clean",
        "librispeech_other",
        "wenetspeech_meeting",
        "aishell2_android",
        "aishell2_ios",
        "aishell2_mic"
    ]
    output_root = "/apdcephfs/private_xuelonggeng/workspace/metagron-lm-meeting/decode_result"
    outfile_label = "_format_asr"
    for test_data, train_set in zip(test_data_list, testset_name_list):
        utils_file.logging_info(f"test_data: {test_data}, train_set: {train_set}")
        output_dir = os.path.join(output_root, model_name, train_set)
        os.makedirs(output_dir, exist_ok=True)
        output_file = os.path.join(output_dir, "text" + outfile_label)

        dict_list = utils_file.load_dict_list_from_jsonl(test_data)
        true_text_tmp_path = os.path.join(output_dir, "true_text.scp")
        true_text_dict = {}
        for data in dict_list:
            if "key" in data:
                uttid = data["key"]
            elif "id" in data:
                uttid = data["id"]
            elif "uttid" in data:
                uttid = data["uttid"]
            else:
                raise Exception(f"uttid not found in data {data}")
            if "text" in data:
                true_text_dict[uttid] = data["text"]
            elif "transcript" in data:
                true_text_dict[uttid] = data["transcript"]
            elif "transcription" in data:
                true_text_dict[uttid] = data["transcription"]
            elif "txt" in data:
                true_text_dict[uttid] = data["txt"]
            else:
                raise Exception(f"text not found in data {data}")
        utils_file.write_dict_to_scp(true_text_dict, true_text_tmp_path)
        all_conversations, all_uttids = do_convert_jsonl_to_conversations_and_uttids(dict_list, model_name, train_set)
        # 将数据分成n_gpus份
        chunk_size = (len(all_conversations) + n_gpus - 1) // n_gpus
        # chunk_size = 1000
        data_chunks = []
        for i in range(n_gpus):
            start_idx = i * chunk_size
            end_idx = min((i + 1) * chunk_size, len(all_conversations))
            conv_chunk = all_conversations[start_idx:end_idx]
            uttid_chunk = all_uttids[start_idx:end_idx]
            data_chunks.append((conv_chunk, uttid_chunk))

        # 准备多进程
        processes = []
        output_queue = Queue()
        start_time = time.time()
        # 启动工作进程
        for gpu_id in range(n_gpus):
            p = Process(
                target=worker_process,
                args=(model_name, gpu_id, data_chunks[gpu_id], output_queue, batch_size)
            )
            p.start()
            processes.append(p)

        # 收集结果
        results = []
        for _ in range(n_gpus):
            gpu_id, chunk_results = output_queue.get()
            results.extend(chunk_results)
            print(f"GPU {gpu_id} 完成处理，共处理 {len(chunk_results)} 条数据")

        # 等待所有进程完成
        for p in processes:
            p.join()

        # 按原始顺序排序结果
        results.sort(key=lambda x: x.split()[0])

        # 写入最终结果
        with open(output_file, "w", encoding="utf-8") as f_out:
            for res in results:
                f_out.write(res)

        # 性能统计
        total_time = time.time() - start_time
        total_samples = len(all_conversations)
        print(f"处理完成! 共处理 {total_samples} 条数据，耗时 {total_time:.2f} 秒")
        print(f"平均速度: {total_samples / total_time:.2f} 样本/秒")
        wer_path = os.path.join(output_dir, "wer")
        utils_file.do_compute_wer(true_text_tmp_path, output_file, wer_path)
        os.system(f'tail -n 15 {wer_path}')


if __name__ == "__main__":
    main()
