|
pip install --upgrade pip |
|
import torch |
|
from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline |
|
|
|
device = "cuda:0" if torch.cuda.is_available() else "cpu" |
|
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32 |
|
|
|
model_id = "openai/whisper-large-v3" |
|
|
|
model = AutoModelForSpeechSeq2Seq.from_pretrained( |
|
model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=False, use_safetensors=True |
|
) |
|
model.to(device) |
|
|
|
processor = AutoProcessor.from_pretrained(model_id) |
|
|
|
pipe = pipeline( |
|
"automatic-speech-recognition", |
|
model=model, |
|
tokenizer=processor.tokenizer, |
|
feature_extractor=processor.feature_extractor, |
|
max_new_tokens=128, |
|
chunk_length_s=30, |
|
batch_size=16, |
|
return_timestamps=True, |
|
torch_dtype=torch_dtype, |
|
device=device, |
|
) |
|
result = pipe("/content/BryanThe_Ideal_Republic.ogg", generate_kwargs={"language": "french"}) |
|
print(result["text"]) |
|
print(result["chunks"]) |
|
|
|
from transformers import RagTokenizer, RagRetriever, RagSequenceForGeneration |
|
|
|
|
|
tokenizer = RagTokenizer.from_pretrained("facebook/rag-token-nq") |
|
retriever = RagRetriever.from_pretrained("facebook/rag-token-nq", index_name="exact", use_dummy_dataset=True) |
|
rag_model = RagSequenceForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever) |
|
|
|
def retrieve_and_generate_response(transcribed_text): |
|
|
|
input_ids = tokenizer(transcribed_text, return_tensors="pt").input_ids |
|
|
|
|
|
outputs = rag_model.generate(input_ids) |
|
response = tokenizer.batch_decode(outputs, skip_special_tokens=True)[0] |
|
|
|
return response |
|
|
|
response = retrieve_and_generate_response(result["text"]) |
|
print("Response:", response) |
|
|