from io import BytesIO
from urllib.request import urlopen
import librosa
from qwen_vl_utils import process_vision_info
from transformers import Qwen2_5OmniProcessor, Qwen2_5OmniThinkerForConditionalGeneration

thinker = Qwen2_5OmniThinkerForConditionalGeneration.from_pretrained("Qwen/Qwen2.5-Omni-7B")
processor = Qwen2_5OmniProcessor.from_pretrained("Qwen/Qwen2.5-Omni-7B")

conversations = [
                  {'role': 'system',
                       'content': 'You are a helpful voice chat bot, and please respond to me in a casual conversation manner using random voice.'},
{"role": "user", "content": [
  {"type": "audio",
       "audio_url": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/glass-breaking-151256.mp3"},
]},
]

text = processor.apply_chat_template(conversation, add_generation_prompt=True, tokenize=False)
audios = [librosa.load(BytesIO(urlopen(conversations[1]['content'][1]['audio_url']).read()),
                        sr=processor.feature_extractor.sampling_rate)]
images, videos = process_vision_info(conversations)
inputs = processor(text=text, audios=audios, images=images, videos=videos, return_tensors="pt", padding=True)

# Generate
inputs['use_audio_in_video'] = `True` or `False`
generation = thinker.generate(**inputs, max_new_tokens=2048)
generate_ids = generation[:, inputs.input_ids.size(1):]

response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
