import torchaudio
import torch
import argparse
import json
import subprocess
from io import BytesIO
from urllib.request import urlopen
from transformers import AutoModel, AutoProcessor
import pdb
from tqdm import tqdm
import concurrent.futures
import os
import queue
import threading
import copy
import torch.multiprocessing as mp
import math
import numpy as np

def transform_segments(segments):
    speakers = []
    for seg in segments:
        speaker = seg['speaker']
        if speaker not in speakers:
            speakers.append(speaker)
    speakers = speakers[:99] # truncate to 100
    speakers = {speaker:f"SPK" + "%02d" % (i+1) for i,speaker in enumerate(speakers)}
    orig_start_time = segments[0]['start_time']
    to_ret = ""
    for seg in segments:
        speaker = speakers.get(seg['speaker'], "SPK")
        text = seg['text']
        to_ret += f"{speaker}: {text}\n"
    to_ret = to_ret.rstrip("\n")
    return to_ret

def make_fbank(wav_data):
    return torchaudio.compliance.kaldi.fbank(
        wav_data.unsqueeze(0) * (1 << 15), 
        num_mel_bins=80, 
        frame_length=25, 
        frame_shift=10, 
        sample_frequency=16000
    )

def pad_audio_to_max(wav_data, max_length_in_s=30, sr=16000):
    empty_tensor = torch.zeros(max_length_in_s * sr + int(0.08*sr)).to(wav_data.dtype)
    empty_tensor[:wav_data.shape[0]] = wav_data
    return empty_tensor

def consumer(annotation_queue, gpu_id, output_queue, model_path, do_sample, temperature, top_p, top_k, max_length):
    device = f"cuda:{gpu_id}"
    print(f"Start Loading Model: {device}")
    model = AutoModel.from_pretrained(model_path, trust_remote_code=True).to(device).bfloat16().eval()
    processor = AutoProcessor.from_pretrained(model_path)
    print(f"End Loading Model: {device}")
    while True:
        batch = annotation_queue.get()
        if batch is None:
            break

        process_batch(batch, model, device, output_queue, processor, do_sample, temperature, top_p, top_k, max_length)

def process_batch(batch, model, device, output_queue, processor, do_sample, temperature, top_p, top_k, max_length):
    anns, texts, audio_parts_list = zip(*batch)
    
    # Process audio parts for each sample
    processed_audios = []
    fbank_lengths_list = []
    for audio_parts in audio_parts_list:
        max_audio_len_in_s = 30
        sr = 16000
        fbank_frame_rate = 100
        fbank_lengths = []
        
        # Pad and compute fbanks for each audio part
        padded_audios = [pad_audio_to_max(part, max_audio_len_in_s, sr) for part in audio_parts]
        fbanks = [make_fbank(part) for part in padded_audios]
        fbank_lengths = [math.ceil(part.shape[0] / sr * 100) for part in audio_parts]
        
        processed_audios.append(torch.stack(fbanks).contiguous())
        fbank_lengths_list.append(torch.tensor(fbank_lengths, dtype=torch.int64))
    
    # Prepare inputs
    inputs = processor(text=texts, return_tensors="pt", padding=True)
    inputs['input_ids'] = inputs['input_ids'].to(device)
    inputs['attention_mask'] = inputs['attention_mask'].to(device)
    
    # Stack audio features
    inputs['input_features'] = torch.cat(processed_audios, dim=0).to(device).bfloat16()
    inputs['feature_lengths'] = torch.cat(fbank_lengths_list, dim=0).to(device)
    # pdb.set_trace()
    # Generate responses
    generate_ids = model.generate(
        **inputs,
        do_sample=do_sample,
        temperature=temperature,
        top_p=top_p,
        top_k=top_k,
        max_new_tokens=max_length,
        repetition_penalty=1.0
    )
    generate_ids = generate_ids[:, inputs.input_ids.size(1):]
    
    responses = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
    
    for ann, response in zip(anns, responses):
        ann["hypothesis"] = response
        print(f"hyp: {response}")
        if 'segments' in ann:
            ref = transform_segments(ann['segments'])
        else:
            ref = ann['text']
        print(f"ref: {ref}")
        output_queue.put(ann)

def get_suffix_prompt(task, index):
    mapper = {
        "asr": ["基于给定辅助信息，转录上述语音内容。"],
        "asr_zh": ["基于给定辅助信息，转录上述语音内容。"],
        "asr_en": ["基于给定辅助信息，转录上述语音内容。"],
        "asr_zhen": ["基于给定辅助信息，转录上述语音内容。"],
        "asr_draft": ["基于给定辅助信息，转录上述语音内容。"],
        "asr_multi": ['按不同发言人，转录上述语音内容。']
    }
    prompt_list = mapper[task]
    return prompt_list[index % len(prompt_list)]

def process_conversation(ann, num_audio_parts):
    conversation = []
    for i in range(num_audio_parts):
        conversation.append({"type": "audio", "audio_url": ann["wav"]})
    if 'context_text' in ann:
        conversation.append({"type": "text", "text": "###Context\n" + ann['context_text'].replace("\\n", "\n") + "\n###Instruct\n"})
    conversation.append({"type": "text", "text": get_suffix_prompt(ann["task"], 0)})

    conversation = [
        {"role": "user", "content": conversation}
        # {"role": "assistant", "content": ann["text"]}
    ]
    return conversation

def read_input_data(input_path):
    annos = []
    with open(input_path, 'r', encoding='utf-8') as f:
        for line in f:
            annos.append(json.loads(line))
    return annos

def write_output_data(output_path, data):
    with open(output_path, 'w', encoding='utf-8') as f:
        for item in data:
            f.write(json.dumps(item, ensure_ascii=False) + '\n')

def load_audio_from_stream(command: str, output_sample_rate: int = 16000):
    pipe = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    stdout, stderr = pipe.communicate()
    if pipe.returncode != 0:
        raise RuntimeError(f"Error: {stderr.decode('utf-8')}")
    audio_data = BytesIO(stdout)
    waveform, sample_rate = torchaudio.load(audio_data, normalize=True)
    return waveform, sample_rate

def process_annotation(ann, processor):
    try:
        audio, sr = torchaudio.load(ann["wav"])
    except:
        audio, sr = load_audio_from_stream(ann['wav'])

    if sr != 16000:
        resampler = torchaudio.transforms.Resample(orig_freq=sr, new_freq=16000)
        audio = resampler(audio)
        sr = 16000

    if audio.shape[0] >= 2:  # stereo, multichannel to mono
        audio = torch.mean(audio, dim=0)
    audio = audio.squeeze(0)
    if audio.size(0) < sr:  # pad audio to at least 1s
        sil = torch.zeros(sr - audio.size(0))
        audio = torch.cat((audio, sil), dim=0)

    max_audio_len_in_s = 30
    audio_parts = [audio[i:i + sr * max_audio_len_in_s] for i in range(0, len(audio), sr * max_audio_len_in_s)]
    
    # Prepare text input
    conversation = process_conversation(ann, len(audio_parts))
    text_input = processor.apply_chat_template(conversation, add_generation_prompt=True, tokenize=False)
    
    return text_input, audio_parts

def producer(annotation_queue, annotations, num_workers, processor):
    progress_bar = tqdm(total=len(annotations), desc="Processing Annotations (Producer)")

    def worker(worker_annotations):
        batch = []
        for ann in worker_annotations:
            text_input, audio_parts = process_annotation(ann, processor)
            batch.append((ann, text_input, audio_parts))

            if len(batch) == batch_size:
                annotation_queue.put(batch)
                batch = []

            progress_bar.update(1)

        if batch:
            annotation_queue.put(batch)

    split_annotations = [annotations[i::num_workers] for i in range(num_workers)]
    threads = []
    for i in range(num_workers):
        t = threading.Thread(target=worker, args=(split_annotations[i],))
        t.start()
        threads.append(t)

    for t in threads:
        t.join()
    for i in range(num_gpus):
        annotation_queue.put(None)
    progress_bar.close()

def writer(output_queue, output_path):
    outputs = []
    while True:
        item = output_queue.get()
        if item is None:
            break
        outputs.append(item)

    write_output_data(output_path, outputs)

def main():
    parser = argparse.ArgumentParser(description="Batch inference for MOE Conformer LLM")
    parser.add_argument('--model_path', type=str, required=True, help='Path to the model')
    parser.add_argument('--input_data', type=str, required=True, help='Path to the input JSONL file')
    parser.add_argument('--output_data', type=str, required=True, help='Path to the output JSONL file')
    parser.add_argument('--batch_size', type=int, default=32, help='Batch size for inference')
    parser.add_argument('--temperature', type=float, default=0.0, help='Temperature for sampling')
    parser.add_argument('--do_sample', action='store_true', help='Whether to use sampling')
    parser.add_argument('--top_p', type=float, default=1.0, help='Top-p (nucleus) sampling')
    parser.add_argument('--top_k', type=int, default=50, help='Top-k sampling')
    parser.add_argument('--max_length', type=int, default=256, help='Maximum length of the generated output')
    parser.add_argument('--num_gpus', type=int, default=2, help='Number of GPUs available for inference')
    args = parser.parse_args()

    global batch_size, temperature, do_sample, top_p, top_k, max_length, num_gpus
    batch_size = args.batch_size
    temperature = args.temperature
    do_sample = args.do_sample
    top_p = args.top_p
    top_k = args.top_k
    max_length = args.max_length
    num_gpus = args.num_gpus
    num_workers = 16 * num_gpus

    processor = AutoProcessor.from_pretrained(args.model_path)
    annotations = read_input_data(args.input_data)
    
    annotation_queue = mp.Queue(maxsize=32)
    output_queue = mp.Queue()

    producer_thread = threading.Thread(
        target=producer, 
        args=(annotation_queue, annotations, num_workers, processor)
    )
    producer_thread.start()

    consumer_processes = []
    for gpu_id in range(num_gpus):
        p = mp.Process(
            target=consumer, 
            args=(annotation_queue, gpu_id, output_queue, args.model_path, 
                 do_sample, temperature, top_p, top_k, max_length)
        )
        p.start()
        consumer_processes.append(p)

    writer_thread = threading.Thread(
        target=writer, 
        args=(output_queue, args.output_data)
    )
    writer_thread.start()

    producer_thread.join()
    for p in consumer_processes:
        p.join()
    output_queue.put(None)
    writer_thread.join()

if __name__ == "__main__":
    mp.set_start_method('spawn')
    main()
