from transformers import AutoConfig, AutoModel, AutoTokenizer, AutoProcessor
import numpy as np
import sys
import pdb
import torch
from tqdm import tqdm
import json
device = "cuda:1"
model_path = sys.argv[1]
data_path = sys.argv[2]
output_path = sys.argv[3]
device = f"cuda:{sys.argv[4]}"
# config = AutoConfig.from_pretrained(model_path, trust_remote_code=True)

# model = AutoModel.from_config(config, trust_remote_code=True)
model = AutoModel.from_pretrained(model_path,trust_remote_code=True).to(device).bfloat16().eval()
# pdb.set_trace()

sil_matrix = torch.tensor(np.load("/apdcephfs_qy3/share_976139/users/adrenzhou/nlp_workdir/Megatron-LM/scripts/Silence.npy"))

processor = AutoProcessor.from_pretrained(model_path)


def get_suffix_prompt(task):
    mapper = {
        "asr": ["转录上述语音内容。"],
        "asr_zh": ["转录上述语音内容。"],
        "asr_en": ["转录上述语音内容。"],
        "asr_zhen": ["转录上述语音内容。"],
        "asr_draft": ["基于给定辅助信息，转录上述语音内容。"],
        "asr_multi": ['按不同发言人，转录上述语音内容。']
    }

    prompt_list = mapper[task]
    return prompt_list[0 % len(prompt_list)]
    
def process_conversation(ann):
    conversation = []
    conversation.append({"type": "audio", "audio_url": ann["pg_npy"]})
    if 'context_text' in ann: # and ann['task'] in ["asr_draft", "asr_hotword"]:
        conversation.append({"type": "text", "text": "###Context\n" +ann['context_text'].replace("\\n", "\n") + "\n###Instruct\n"})
    conversation.append({"type": "text", "text": get_suffix_prompt(ann["task"])})

    conversation = [
        {"role": "user", "content": conversation},
        {"role": "assistant", "content": ann["text"]}
    
    ]
    return conversation


fout = open(output_path,'w')
with open(data_path, 'r') as f:
    objs = []
    for line in f:
        obj = json.loads(line)
        objs.append(obj)
    
    for obj in tqdm(objs):
        conversation = process_conversation(obj)
        text = processor.apply_chat_template(conversation, add_generation_prompt=True, tokenize=False)
        matrix = torch.tensor(np.load(obj['pg_npy']))
        inputs = processor(text=text, return_tensors="pt")
        inputs['attention_mask'] |= 1

        PENGUINS_RATE = 50
        max_audio_len_in_s = 30

        if matrix.size(2) < PENGUINS_RATE:
            sil = sil_matrix[:,:,:PENGUINS_RATE-matrix.size(2)].clone()
            # sil = torch.zeros(50 - matrix.size(2))
            matrix = torch.cat((matrix, sil), dim=2)

        audio_parts = [matrix[i: i+ PENGUINS_RATE * max_audio_len_in_s] for i in range(0, matrix.shape[-1], PENGUINS_RATE * max_audio_len_in_s)]
        final_input_features = []
        final_feature_attention_mask = []
        for part in audio_parts:
            part_len = part.shape[-1]
            feature_attention_mask = torch.zeros((1, PENGUINS_RATE * max_audio_len_in_s), dtype=torch.int32)
            feature_attention_mask[:,:part_len] = 1
            input_features = sil_matrix[:,:,:PENGUINS_RATE * max_audio_len_in_s].clone()
            input_features[:,:,:part_len] = part
            final_input_features.append(input_features)
            final_feature_attention_mask.append(feature_attention_mask)
        inputs['input_features'] = torch.cat(final_input_features, dim=0)
        inputs['feature_attention_mask'] = torch.cat(final_feature_attention_mask, dim=0)


        sample = inputs
        for k in sample:
            typ = sample[k].dtype
            if typ == torch.float:
                sample[k] = sample[k].bfloat16()
            sample[k] = sample[k].to(device)
        generate_ids = model.generate(**sample, do_sample=False, max_new_tokens=128)
        generate_ids = generate_ids[:, inputs.input_ids.size(1):]
        response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)[0]    
        response = response.strip()
        ref_response = obj['text']        
        print("hyp:", response)
        print("ref:", ref_response)
        obj['hypothesis'] = response
        item_str = json.dumps(obj, ensure_ascii=False)
        fout.write(item_str+"\n")



