import json
import pdb
import sys

import numpy as np
import torch
import torchaudio
from tqdm import tqdm
from transformers import AutoConfig, AutoModel, AutoProcessor, AutoTokenizer, AutoModelForCausalLM

from configuration_IdealLLM_v2 import IdealLLMConfig
from megatron.core.models.speech.ssl_encoder import S3prlFrontend
from modeling_IdealLLM_v2 import IdealLLMModel

device = "cuda:0"
model_path = sys.argv[1]
data_path = sys.argv[2]
output_path = sys.argv[3]
device = f"cuda:{sys.argv[4]}"

config = IdealLLMConfig.from_pretrained(model_path, trust_remote_code=True)
model = IdealLLMModel.from_pretrained(model_path,
                                      config=config) #.bfloat16().eval()
model = model.bfloat16().to(device).eval()
processor = AutoProcessor.from_pretrained(model_path)

MLC_SLM_dict = {"english":0, "french":1, "german":2, "italian":3, "japanese":4, "korean":5, "portuguese":6, "russian":7, "spanish":8, "thai":9, "vietnamese":10}      


def compare_model_parameters(model1, model2):
    diff_params = {}
    # 遍历两个模型的所有参数
    for (name1, param1), (name2, param2) in zip(model1.named_parameters(), model2.named_parameters()):
        assert name1 == name2, f"参数名称不匹配: {name1} vs {name2}"
        if not torch.equal(param1.data, param2.data):
            diff_params[name1] = (param1.data, param2.data)
    return diff_params
#pdb.set_trace()
#language_model = AutoModel.from_pretrained("/apdcephfs/share_976139/users/hongfeixue/model/Qwen3-8B-base")
#diff = compare_model_parameters(model.language_model.model.to('cpu'), language_model.to('cpu'))

def get_suffix_prompt(task):
    mapper = {
        "asr": ["转录上述语音内容。"],
        "asr_zh": ["转录上述语音内容。"],
        "asr_en": ["转录上述语音内容。"],
        "asr_zhen": ["转录上述语音内容。"],
        "asr_draft": ["基于给定辅助信息，转录上述语音内容。"],
        "asr_multi": ['按不同发言人，转录上述语音内容。']
    }

    prompt_list = mapper[task]
    return prompt_list[0 % len(prompt_list)]
    
def process_conversation(ann):
    conversation = []
    conversation.append({"type": "audio", "audio_url": ann["pg_npy"]})
    if 'context_text' in ann: # and ann['task'] in ["asr_draft", "asr_hotword"]:
        conversation.append({"type": "text", "text": "###Context\n" +ann['context_text'].replace("\\n", "\n") + "\n###Instruct\n"})
    conversation.append({"type": "text", "text": get_suffix_prompt(ann["task"])})
    ann["text"] = ""
    conversation = [
        {"role": "user", "content": conversation},
        {"role": "assistant", "content": ann["text"]}
    
    ]
    return conversation

def conversation_processor(ann):
    conversation = []
    conversation.append({"type": "audio", "audio_url": ann["wav"]})
    conversation.append({"type": "text", "text": get_suffix_prompt(ann["task"])})
    response = ann['text']
    
    conversation = [{"role": "user", "content": conversation}]         
    return conversation


fout = open(output_path,'w')
with open(data_path, 'r') as f:
    objs = []
    for line in f:
        obj = json.loads(line)
        objs.append(obj)
    
    for obj in tqdm(objs):
        audio, sr = torchaudio.load(obj["wav"])

        if sr != 16000:
            resampler = torchaudio.transforms.Resample(orig_freq=sr, new_freq=16000)
            audio = resampler(audio)
            sr = 16000

        if audio.shape[0] >= 2:  # stereo, multichannel to mono
            # audio = torch.mean(audio, dim=0)
            # use first channel
            audio = audio[:1]
        audio = audio.squeeze(0)
        if audio.size(0) < sr:  # pad audio to at least 1s
            sil = torch.zeros(sr - audio.size(0))
            audio = torch.cat((audio, sil), dim=0)

        # Split audio into parts of 30s or less
        max_audio_len_in_s = 30
        audio_parts = [audio[i:i + sr * max_audio_len_in_s] for i in range(0, len(audio), sr * max_audio_len_in_s)]
        audios = [part.numpy() for part in audio_parts]

        conversation = conversation_processor(obj)
        # text_input = processor.apply_chat_template(conversation, add_generation_prompt=True, tokenize=False)
        text_input = '<|im_start|>user\nTranscribe the speech to text.<|im_end|>\n<|AUDIO|><|im_start|>assistant\n<think>\n\n</think>\n\n'
        inputs = processor(text=text_input, return_tensors="pt", padding=True)
        audios = torch.tensor(np.array(audios), dtype=torch.float32)
        audios_length = torch.tensor([np.zeros(sample.size(0)) for sample in audios], dtype=torch.bool)
        langid = torch.tensor(MLC_SLM_dict[obj['language'].split('-')[0]], dtype=torch.int32)
        inputs_audio = {'input_features': audios, 'feature_attention_mask': audios_length, 'lid': langid.unsqueeze(0)}
        inputs.update(inputs_audio)


        # inputs['input_features'] = torch.cat(final_input_features, dim=0)
        # inputs['feature_attention_mask'] = torch.cat(final_feature_attention_mask, dim=0)


        sample = inputs
        for k in sample:
            typ = sample[k].dtype
            if typ == torch.float:
                sample[k] = sample[k].bfloat16()
            sample[k] = sample[k].to(device)
        # pdb.set_trace()
        # self.eos_token_id
        generate_ids = model.generate(**sample, do_sample=False, max_new_tokens=200, num_beams=1, min_length=1, temperature=1.0, repetition_penalty=1.0, length_penalty=1.0, eos_token_id=151645, pad_token_id=151643)
        generate_ids = generate_ids[:, inputs.input_ids.size(1):]
        response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)[0]    
        response = response.strip()
        ref_response = obj['text']        
        print("hyp:", response)
        print("ref:", ref_response)
        obj['hypothesis'] = response
        item_str = json.dumps(obj, ensure_ascii=False)
        fout.write(item_str+"\n")
