File size: 1,810 Bytes
b1377a3
cfdbf22
7c4667a
 
 
 
 
 
 
 
 
d411a02
7c4667a
 
 
965e092
7c4667a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49ed7a0
 
7c4667a
49ed7a0
7c4667a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cfdbf22
7c4667a
d411a02
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
from pytube import YouTube
import os
import torch
from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline
import whisperx
from datasets import load_dataset
import os.path as osp
from mlxtend.file_io import find_files
from mlxtend.utils import Counter
import accelerate
import gc
import gradio as gr

# Definimos una función que se encarga de llevar a cabo las transcripciones
def URLToText(URL):
    
  # url input from user
  yt = YouTube(URL)

  # extract only audio
  video = yt.streams.filter(only_audio=True).first()

  # check for destination to save file
  destination = '.'

  # download the file
  out_file = video.download(output_path=destination)

  # save the file
  base, ext = os.path.splitext(out_file)
  base = base.replace(" ", "")
  new_file = base + '.mp3'
  os.rename(out_file, new_file)

  # Pasamos el auido a texto
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
  torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32

  model_id = "openai/whisper-medium"

  model = AutoModelForSpeechSeq2Seq.from_pretrained(
      model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True
  )
  model.to(device)

  processor = AutoProcessor.from_pretrained(model_id)

  pipe = pipeline(
      "automatic-speech-recognition",
      model=model,
      tokenizer=processor.tokenizer,
      feature_extractor=processor.feature_extractor,
      max_new_tokens=128,
      chunk_length_s=30,
      batch_size=16,
      return_timestamps=True,
      torch_dtype=torch_dtype,
      device=device,
  )
  result = pipe(new_file)
  return result["text"]

# Creamos la interfaz y la lanzamos. 
gr.Interface(fn=URLToText, inputs=gr.inputs.Textbox(label="Video URL"), outputs=gr.outputs.Textbox(label="Transcripción")).launch(share=False)