import torchaudio
import numpy as np
import torch
import argparse
import json
import subprocess
from io import BytesIO
from urllib.request import urlopen
from transformers import Qwen2AudioForConditionalGeneration, AutoProcessor, AutoModel
import pdb
from tqdm import tqdm
import concurrent.futures
import os
import queue
import threading
import copy
import torch.multiprocessing as mp
sil_matrix = torch.tensor(np.load("/apdcephfs_qy3/share_976139/users/adrenzhou/nlp_workdir/Megatron-LM/scripts/Silence.npy"))

PENGUINS_RATE = 50
max_audio_len_in_s = 30
def transform_segments(segments):
    speakers = []
    for seg in segments:
        speaker = seg['speaker']
        if speaker not in speakers:
            speakers.append(speaker)
    speakers = speakers[:99] # truncate to 100
    speakers = {speaker:f"SPK" + "%02d" % (i+1) for i,speaker in enumerate(speakers)}
    orig_start_time = segments[0]['start_time']
    to_ret = ""
    for seg in segments:
        speaker = speakers.get(seg['speaker'], "SPK")
        # do not use time first
        text = seg['text']
        to_ret += f"{speaker}: {text}\n"
    # speakers = set(speakers)
    to_ret = to_ret.rstrip("\n")
    return to_ret

def consumer(annotation_queue, gpu_id, output_queue, model_path, shared_model, do_sample, temperature, top_p, top_k, max_length):
    device = f"cuda:{gpu_id}"
    # model = copy.deepcopy(shared_model).to(device)
    # model = shared_model.to(device)
    model = AutoModel.from_pretrained(model_path, trust_remote_code=True).to(device).bfloat16()
    model.eval()
    processor = AutoProcessor.from_pretrained(model_path)
    # batch = []
    while True:
        batch = annotation_queue.get()
        if batch is None:
            break

        process_batch(batch, model, device, output_queue, processor, do_sample, temperature, top_p, top_k, max_length)

def process_batch(batch, model, device, output_queue, processor, do_sample, temperature, top_p, top_k, max_length):
    anns, texts, audios = zip(*batch)
    # print(anns)

    # anns = anns[0]
    texts = texts[0]
    audios = audios[0]
    inputs = processor(text=texts, return_tensors="pt")
    inputs['attention_mask'] |= 1    
    # inputs = processor(text=texts, audios=[audio for audio_parts in audios for audio in audio_parts], return_tensors="pt", padding=True, sampling_rate=16000)
    # inputs['input_ids'] = inputs['input_ids'].to(device)
    audio_parts = audios

    final_input_features = []
    final_feature_attention_mask = []
    for part in audio_parts:
        part_len = part.shape[-1]
        feature_attention_mask = torch.zeros((1, PENGUINS_RATE * max_audio_len_in_s), dtype=torch.int32)
        feature_attention_mask[:,:part_len] = 1
        input_features = sil_matrix[:,:,:PENGUINS_RATE * max_audio_len_in_s].clone()
        input_features[:,:,:part_len] = part
        final_input_features.append(input_features)
        final_feature_attention_mask.append(feature_attention_mask)
    inputs['input_features'] = torch.cat(final_input_features, dim=0)
    inputs['feature_attention_mask'] = torch.cat(final_feature_attention_mask, dim=0)    

    sample = inputs
    for k in sample:
        typ = sample[k].dtype
        if typ == torch.float:
            sample[k] = sample[k].bfloat16()
        sample[k] = sample[k].to(device)
        
    generate_ids = model.generate(
        **inputs,
        do_sample=do_sample,
        temperature=temperature,
        top_p=top_p,
        top_k=top_k,
        max_new_tokens=max_length,
        repetition_penalty=1.0
    )
    generate_ids = generate_ids[:, inputs.input_ids.size(1):]

    responses = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
    for ann, response in zip(anns, responses):
        ann["hypothesis"] = response
        print(f"hyp: {response}")
        if 'segments' in ann:
            ref = transform_segments(ann['segments'])
        else:
            ref = ann['text']
        print(f"ref: {ref}")
        output_queue.put(ann)

def main():
    parser = argparse.ArgumentParser(description="Batch inference for Qwen2 Audio Model")
    parser.add_argument('--model_path', type=str, default="/apdcephfs_qy3/share_976139/users/adrenzhou/nlp_workdir/pretrained_models/Qwen/Qwen2-Audio-7B-Instruct", help='Path to the model')
    parser.add_argument('--input_data', type=str, required=True, help='Path to the input JSONL file')
    parser.add_argument('--output_data', type=str, required=True, help='Path to the output JSONL file')
    parser.add_argument('--batch_size', type=int, default=32, help='Batch size for inference')
    parser.add_argument('--temperature', type=float, default=0.0, help='Temperature for sampling')
    parser.add_argument('--do_sample', action='store_true', help='Whether to use sampling; set to True if temperature > 0')
    parser.add_argument('--top_p', type=float, default=1.0, help='Top-p (nucleus) sampling')
    parser.add_argument('--top_k', type=int, default=50, help='Top-k sampling')
    parser.add_argument('--max_length', type=int, default=256, help='Maximum length of the generated output')
    parser.add_argument('--num_gpus', type=int, default=2, help='Number of GPUs available for inference')
    # parser.add_argument('--num_workers', type=int, default=2 * 16, help='Number of worker threads for processing annotations')
    args = parser.parse_args()

    model_path = args.model_path
    input_data_path = args.input_data
    output_data_path = args.output_data
    batch_size = args.batch_size
    temperature = args.temperature
    do_sample = args.do_sample if temperature > 0 else False
    top_p = args.top_p
    top_k = args.top_k
    max_length = args.max_length
    num_gpus = args.num_gpus
    num_workers = 16 * num_gpus

    processor = AutoProcessor.from_pretrained(model_path)

    
    # model = Qwen2AudioForConditionalGeneration.from_pretrained(model_path).cpu().bfloat16()
    # model.share_memory()


    def get_suffix_prompt(task, index):
        mapper = {
            "asr": ["转录上述语音内容。"],
            "asr_zh": ["基于给定辅助信息，转录上述语音内容。"],
            "asr_en": ["基于给定辅助信息，转录上述语音内容。"],
            "asr_zhen": ["基于给定辅助信息，转录上述语音内容。"],
            "asr_draft": ["基于给定辅助信息，转录上述语音内容。"],
            "asr_multi": ['按不同发言人，转录上述语音内容。']
        }
        if "Qwen2-Audio-7B-Instruct" in model_path:
            mapper = {
                "asr": ["逐字转录上述语音内容，直接回答结果，避免输出其他内容。"],
                "asr_zh": ["逐字转录上述语音内容，直接回答结果，避免输出其他内容。"],
                "asr_zhen": ["逐字转录上述语音内容，直接回答结果，避免输出其他内容。"],
                "asr_en": ["Recognize the speech in English without any other comment."],
            }
        elif "Qwen2-Audio" in model_path:
            mapper = {
                "asr": [
                    "Detect the language and recognize the speech: <|zh|>"
                ],
                "asr_en": [
                    "Detect the language and recognize the speech: <|en|>"
                ],
                "asr_zh": [
                    "Detect the language and recognize the speech: <|zh|>"
                ],
                "asr_zhen": [
                    "Detect the language and recognize the speech: <|zh|>"
                ],
                "asr_draft": [
                    "Detect the language and recognize the speech: <|zh|>"
                ]
            }
        
        prompt_list = mapper[task]
        return prompt_list[index % len(prompt_list)]
    def read_input_data(input_path):
        annos = []
        with open(input_path, 'r', encoding='utf-8') as f:
            for line in f:
                annos.append(json.loads(line))
        return annos

    def write_output_data(output_path, data):
        with open(output_path, 'w', encoding='utf-8') as f:
            for item in data:
                f.write(json.dumps(item, ensure_ascii=False) + '\n')

    def load_audio_from_stream(command: str, output_sample_rate: int = 16000):
        pipe = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        stdout, stderr = pipe.communicate()
        if pipe.returncode != 0:
            raise RuntimeError(f"Error: {stderr.decode('utf-8')}")
        audio_data = BytesIO(stdout)
        waveform, sample_rate = torchaudio.load(audio_data, normalize=True)
        return waveform, sample_rate

    def process_conversation(ann):
        conversation = []
        conversation.append({"type": "audio", "audio_url": ann["pg_npy"]})
        if 'context_text' in ann: # and ann['task'] in ["asr_draft", "asr_hotword"]:
            conversation.append({"type": "text", "text": "###Context\n" +ann['context_text'].replace("\\n", "\n") + "\n###Instruct\n"})
        conversation.append({"type": "text", "text": get_suffix_prompt(ann["task"], 0)})

        conversation = [
            {"role": "user", "content": conversation},
            {"role": "assistant", "content": ann["text"]}
        
        ]
        return conversation
    def process_annotation(ann):
        obj = ann
        conversation = process_conversation(obj)
        text = processor.apply_chat_template(conversation, add_generation_prompt=True, tokenize=False)
        matrix = torch.tensor(np.load(obj['pg_npy']))


        if matrix.size(2) < PENGUINS_RATE:
            sil = sil_matrix[:,:,:PENGUINS_RATE-matrix.size(2)].clone()
            # sil = torch.zeros(50 - matrix.size(2))
            matrix = torch.cat((matrix, sil), dim=2)

        audio_parts = [matrix[i: i+ PENGUINS_RATE * max_audio_len_in_s] for i in range(0, matrix.shape[-1], PENGUINS_RATE * max_audio_len_in_s)]



        return text, audio_parts

    def producer(annotation_queue, annotations, num_workers):
        progress_bar = tqdm(total=len(annotations), desc="Processing Annotations (Producer)")

        def worker(worker_annotations):
            batch = []
            for ann in worker_annotations:
                text_input, audios = process_annotation(ann)
                batch.append((ann, text_input, audios))

                if len(batch) == batch_size:
                    annotation_queue.put(batch)
                    batch = []

                progress_bar.update(1)

            # Put remaining batch if not empty
            if batch:
                annotation_queue.put(batch)

        split_annotations = [annotations[i::num_workers] for i in range(num_workers)]
        threads = []
        for i in range(num_workers):
            t = threading.Thread(target=worker, args=(split_annotations[i],))
            t.start()
            threads.append(t)

        for t in threads:
            t.join()
        for i in range(num_gpus):
            annotation_queue.put(None)
        progress_bar.close()



    def writer(output_queue, output_path):
        outputs = []
        while True:
            item = output_queue.get()
            if item is None:
                break
            outputs.append(item)

        write_output_data(output_path, outputs)

    annotations = read_input_data(input_data_path)
    annotation_queue = mp.Queue(maxsize=32)
    output_queue = mp.Queue()

    producer_thread = threading.Thread(target=producer, args=(annotation_queue, annotations, num_workers))
    producer_thread.start()

    consumer_processes = []
    for gpu_id in range(num_gpus):
        p = mp.Process(target=consumer, args=(annotation_queue, gpu_id, output_queue, model_path, None,  do_sample, temperature, top_p, top_k, max_length))
        p.start()
        consumer_processes.append(p)

    writer_thread = threading.Thread(target=writer, args=(output_queue, output_data_path))
    writer_thread.start()

    producer_thread.join()
    for p in consumer_processes:
        p.join()
    output_queue.put(None)
    writer_thread.join()

if __name__ == "__main__":
    mp.set_start_method('spawn')
    main()
