File size: 2,521 Bytes
56e4416
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
from transformers import pipeline
import gradio as gr
from pytube import YouTube

pipe = pipeline(model="kk90ujhun/whisper-small-zh")  # change to "your-username/the-name-you-picked"

def transcribe(audio,url):
    if url:
      youtubeObject = YouTube(url).streams.first().download()
      audio = youtubeObject
    text = pipe(audio)["text"]
    return text

iface = gr.Interface(
    fn=transcribe,
    inputs=[
            gr.Audio(source="microphone", type="filepath"),
            gr.inputs.Textbox(label="give me an url",default ="https://www.youtube.com/watch?v=YzGsIavAo_E")
            ],
    outputs="text",
    title="Whisper Small Chinese",
    description="Realtime demo for chinese speech recognition using a fine-tuned Whisper small model.",
)

iface.launch()



# import gradio as gr
# import numpy as np
# from PIL import Image
# import requests
#
# import hopsworks
# import joblib
#
# project = hopsworks.login()
# fs = project.get_feature_store()
#
# #HwJaWmtvaCzFra3g.89QYueFGuScRnJkiepzG2tiWtKSrqNHCCJrnVie9fwhIMeJxRUpAGAT7mF36MDMv
# mr = project.get_model_registry()
# model = mr.get_model("iris_modal", version=1)
# model_dir = model.download()
# model = joblib.load(model_dir + "/iris_model.pkl")
#
#
# def iris(sepal_length, sepal_width, petal_length, petal_width):
#     input_list = []
#     input_list.append(sepal_length)
#     input_list.append(sepal_width)
#     input_list.append(petal_length)
#     input_list.append(petal_width)
#     # 'res' is a list of predictions returned as the label.
#     res = model.predict(np.asarray(input_list).reshape(1, -1))
#     # We add '[0]' to the result of the transformed 'res', because 'res' is a list, and we only want
#     # the first element.
#     flower_url = "https://raw.githubusercontent.com/featurestoreorg/serverless-ml-course/main/src/01-module/assets/" + res[0] + ".png"
#     img = Image.open(requests.get(flower_url, stream=True).raw)
#     return img
#
# demo = gr.Interface(
#     fn=iris,
#     title="Iris Flower Predictive Analytics",
#     description="Experiment with sepal/petal lengths/widths to predict which flower it is.",
#     allow_flagging="never",
#     inputs=[
#         gr.inputs.Number(default=1.0, label="sepal length (cm)"),
#         gr.inputs.Number(default=1.0, label="sepal width (cm)"),
#         gr.inputs.Number(default=1.0, label="petal length (cm)"),
#         gr.inputs.Number(default=1.0, label="petal width (cm)"),
#         ],
#     outputs=gr.Image(type="pil"))
#
# demo.launch(share = True)
#