howlbz commited on
Commit
76be4b4
1 Parent(s): 319078d

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +75 -0
app.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import pipeline
2
+ import gradio as gr
3
+ from pytube import YouTube
4
+
5
+ pipe = pipeline(model="kk90ujhun/whisper-small-zh") # change to "your-username/the-name-you-picked"
6
+
7
+ def transcribe(audio,url):
8
+ if url:
9
+ youtubeObject = YouTube(url).streams.first().download()
10
+ audio = youtubeObject
11
+ text = pipe(audio)["text"]
12
+ return text
13
+
14
+ iface = gr.Interface(
15
+ fn=transcribe,
16
+ inputs=[
17
+ gr.Audio(source="microphone", type="filepath"),
18
+ gr.inputs.Textbox(label="give me an url",default ="https://www.youtube.com/watch?v=YzGsIavAo_E")
19
+ ],
20
+ outputs="text",
21
+ title="Whisper Small Chinese",
22
+ description="Realtime demo for chinese speech recognition using a fine-tuned Whisper small model.",
23
+ )
24
+
25
+ iface.launch()
26
+
27
+
28
+
29
+ # import gradio as gr
30
+ # import numpy as np
31
+ # from PIL import Image
32
+ # import requests
33
+ #
34
+ # import hopsworks
35
+ # import joblib
36
+ #
37
+ # project = hopsworks.login()
38
+ # fs = project.get_feature_store()
39
+ #
40
+ # #HwJaWmtvaCzFra3g.89QYueFGuScRnJkiepzG2tiWtKSrqNHCCJrnVie9fwhIMeJxRUpAGAT7mF36MDMv
41
+ # mr = project.get_model_registry()
42
+ # model = mr.get_model("iris_modal", version=1)
43
+ # model_dir = model.download()
44
+ # model = joblib.load(model_dir + "/iris_model.pkl")
45
+ #
46
+ #
47
+ # def iris(sepal_length, sepal_width, petal_length, petal_width):
48
+ # input_list = []
49
+ # input_list.append(sepal_length)
50
+ # input_list.append(sepal_width)
51
+ # input_list.append(petal_length)
52
+ # input_list.append(petal_width)
53
+ # # 'res' is a list of predictions returned as the label.
54
+ # res = model.predict(np.asarray(input_list).reshape(1, -1))
55
+ # # We add '[0]' to the result of the transformed 'res', because 'res' is a list, and we only want
56
+ # # the first element.
57
+ # flower_url = "https://raw.githubusercontent.com/featurestoreorg/serverless-ml-course/main/src/01-module/assets/" + res[0] + ".png"
58
+ # img = Image.open(requests.get(flower_url, stream=True).raw)
59
+ # return img
60
+ #
61
+ # demo = gr.Interface(
62
+ # fn=iris,
63
+ # title="Iris Flower Predictive Analytics",
64
+ # description="Experiment with sepal/petal lengths/widths to predict which flower it is.",
65
+ # allow_flagging="never",
66
+ # inputs=[
67
+ # gr.inputs.Number(default=1.0, label="sepal length (cm)"),
68
+ # gr.inputs.Number(default=1.0, label="sepal width (cm)"),
69
+ # gr.inputs.Number(default=1.0, label="petal length (cm)"),
70
+ # gr.inputs.Number(default=1.0, label="petal width (cm)"),
71
+ # ],
72
+ # outputs=gr.Image(type="pil"))
73
+ #
74
+ # demo.launch(share = True)
75
+ #