import torchaudio
import torch
import sys
import pdb
from io import BytesIO
from urllib.request import urlopen
import json
from transformers import Qwen2AudioForConditionalGeneration, AutoProcessor, TextIteratorStreamer
from threading import Thread
device = "cuda:2"
model_path = "/apdcephfs_qy3/share_976139/users/adrenzhou/nlp_workdir/pretrained_models/Qwen/Qwen2-Audio-7B-Instruct"
if len(sys.argv) >= 3:
    model_path = sys.argv[1]
    inp_file = sys.argv[2]


def transform_segments(segments):
    speakers = []
    for seg in segments:
        speaker = seg['speaker']
        if speaker not in speakers:
            speakers.append(speaker)
    speakers = speakers[:99] # truncate to 100
    speakers = {speaker:f"SPK" + "%02d" % (i+1) for i,speaker in enumerate(speakers)}
    orig_start_time = segments[0]['start_time']
    to_ret = ""
    for seg in segments:
        speaker = speakers.get(seg['speaker'], "SPK")
        # do not use time first
        text = seg['text']
        to_ret += f"{speaker}: {text}\n"
    # speakers = set(speakers)
    to_ret = to_ret.rstrip("\n")
    return to_ret
def get_suffix_prompt(task):
    mapper = {
        "asr": ["转录上述语音内容。"],
        "asr_zh": ["转录上述语音内容。"],
        "asr_en": ["转录上述语音内容。"],
        "asr_zhen": ["转录上述语音内容。"],
        "asr_draft": ["基于给定辅助信息，转录上述语音内容。"],
        "asr_multi": ['按不同发言人，转录上述语音内容。']
    }

    prompt_list = mapper[task]
    return prompt_list[0 % len(prompt_list)]
    
def process_conversation(ann):
    conversation = []
    conversation.append({"type": "audio", "audio_url": ann["wav"]})
    if 'context_text' in ann: # and ann['task'] in ["asr_draft", "asr_hotword"]:
        conversation.append({"type": "text", "text": "###Context\n" +ann['context_text'].replace("\\n", "\n") + "\n###Instruct\n"})
    conversation.append({"type": "text", "text": get_suffix_prompt(ann["task"])})
    
    conversation = [{"role": "user", "content": conversation}]         
    return conversation
    
processor = AutoProcessor.from_pretrained(model_path)
model = Qwen2AudioForConditionalGeneration.from_pretrained(model_path).to(device).bfloat16().eval()
# pdb.set_trace()
# inputs = torch.load("inputs_audio.pt")

# for k in inputs:
#     # typ = inputs[k].dtype
#     # if typ == torch.float:
#     #     sample[k] = sample[k].bfloat16()
#     inputs[k] = inputs[k].to(device)
# output = model(
#     **inputs
# )
# pdb.set_trace()

with open(inp_file,'r') as f:
    for line in f:
        obj = json.loads(line)
        conversation = process_conversation(obj)
        text = processor.apply_chat_template(conversation, add_generation_prompt=True, tokenize=False)
        audios = []
        for message in conversation:
            if isinstance(message["content"], list):
                for ele in message["content"]:
                    if ele["type"] == "audio":
                        # 使用 torchaudio 读取音频文件
                        # audio_stream = BytesIO(urlopen(ele['audio_url']).read())
                        audio_stream = ele['audio_url']
                        waveform, sample_rate = torchaudio.load(audio_stream)
                        # 如果采样率与模型的不一致，则进行重采样
                        if sample_rate != processor.feature_extractor.sampling_rate:
                            resampler = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=processor.feature_extractor.sampling_rate)
                            waveform = resampler(waveform)
                        audios.append(waveform.squeeze().numpy())
        inputs = processor(text=text, audios=audios, return_tensors="pt", padding=True)
        inputs['input_ids'] = inputs['input_ids'].to(device)
        # output = model(**inputs)
        # idxs = output['logits'].argmax(-1)
        # pdb.set_trace()
        # inputs = dict(inputs)
        # inputs['logits'] = output.logits
        # torch.save(inputs, 'hf_audio.pt')
        # print("saved")
        # exit(0)
        # dic = torch.load("mg_io.pt")
        # dummy_input = {
        #     k:dic[k].to(device) for k in inputs.keys()
        # }
        # output = model(**dummy_input)

        # pdb.set_trace()
        generate_ids = model.generate(**inputs, do_sample=False, max_new_tokens=128)
        generate_ids = generate_ids[:, inputs.input_ids.size(1):]
        response = processor.batch_decode(generate_ids, skip_special_tokens=False, clean_up_tokenization_spaces=False)[0]
        # print("REF: ", obj['text'])
        if 'segments' in obj:
            ref_response = transform_segments(obj['segments'])
        else:
            ref_response = obj['text']        
        print("REF:", ref_response)
        print()
        print("HYP: ", response)
        print()
        print()
        
# print("预期输出： 无线深宵重播神剧大时代")

# import time


# streamer = TextIteratorStreamer(processor.tokenizer, skip_prompt=True)
# generation_kwargs = dict(
#     inputs,
#     streamer=streamer,
#     do_sample=False,
#     max_new_tokens=128
# )
# thread = Thread(target=model.generate, kwargs=generation_kwargs)
# thread.start()
# print("start decoding")
# start = time.time()

# generated_text = ""
# for new_text in streamer:
#     end = time.time()
#     print(f"Time: {end - start}")
#     generated_text += new_text
#     print(generated_text)
