awacke1 commited on
Commit
a37f5db
1 Parent(s): 46208d0

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +107 -0
app.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import deque
2
+
3
+ import streamlit as st
4
+ import torch
5
+ from streamlit_player import st_player
6
+ from transformers import AutoModelForCTC, Wav2Vec2Processor
7
+
8
+ from streaming import ffmpeg_stream
9
+
10
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
11
+ player_options = {
12
+ "events": ["onProgress"],
13
+ "progress_interval": 200,
14
+ "volume": 1.0,
15
+ "playing": True,
16
+ "loop": False,
17
+ "controls": False,
18
+ "muted": False,
19
+ "config": {"youtube": {"playerVars": {"start": 1}}},
20
+ }
21
+
22
+ # disable rapid fading in and out on `st.code` updates
23
+ st.markdown("<style>.element-container{opacity:1 !important}</style>", unsafe_allow_html=True)
24
+
25
+ @st.cache(hash_funcs={torch.nn.parameter.Parameter: lambda _: None})
26
+ def load_model(model_path="facebook/wav2vec2-large-robust-ft-swbd-300h"):
27
+ processor = Wav2Vec2Processor.from_pretrained(model_path)
28
+ model = AutoModelForCTC.from_pretrained(model_path).to(device)
29
+ return processor, model
30
+
31
+
32
+ processor, model = load_model()
33
+
34
+
35
+ def stream_text(url, chunk_duration_ms, pad_duration_ms):
36
+ sampling_rate = processor.feature_extractor.sampling_rate
37
+
38
+ # calculate the length of logits to cut from the sides of the output to account for input padding
39
+ output_pad_len = model._get_feat_extract_output_lengths(int(sampling_rate * pad_duration_ms / 1000))
40
+
41
+ # define the audio chunk generator
42
+ stream = ffmpeg_stream(url, sampling_rate, chunk_duration_ms=chunk_duration_ms, pad_duration_ms=pad_duration_ms)
43
+
44
+ leftover_text = ""
45
+ for i, chunk in enumerate(stream):
46
+ input_values = processor(chunk, sampling_rate=sampling_rate, return_tensors="pt").input_values
47
+
48
+ with torch.no_grad():
49
+ logits = model(input_values.to(device)).logits[0]
50
+ if i > 0:
51
+ logits = logits[output_pad_len : len(logits) - output_pad_len]
52
+ else: # don't count padding at the start of the clip
53
+ logits = logits[: len(logits) - output_pad_len]
54
+
55
+ predicted_ids = torch.argmax(logits, dim=-1).cpu().tolist()
56
+ if processor.decode(predicted_ids).strip():
57
+ leftover_ids = processor.tokenizer.encode(leftover_text)
58
+ # concat the last word (or its part) from the last frame with the current text
59
+ text = processor.decode(leftover_ids + predicted_ids)
60
+ # don't return the last word in case it's just partially recognized
61
+ text, leftover_text = text.rsplit(" ", 1)
62
+ yield text
63
+ else:
64
+ yield leftover_text
65
+ leftover_text = ""
66
+
67
+ yield leftover_text
68
+
69
+
70
+ def main():
71
+ state = st.session_state
72
+ st.header("YouTube Streaming ASR with Robust Wav2Vec2")
73
+
74
+ with st.form(key="inputs_form"):
75
+ state.youtube_url = st.text_input("YouTube URL", "https://www.youtube.com/watch?v=yJmiZ1Mo1cQ")
76
+ state.chunk_duration_ms = st.slider("Audio chunk duration (ms)", 2000, 10000, 3000, 100)
77
+ state.pad_duration_ms = st.slider("Padding duration (ms)", 100, 5000, 1000, 100)
78
+ submit_button = st.form_submit_button(label="Submit")
79
+
80
+ if submit_button or "asr_stream" not in state:
81
+ # a hack to update the video player on value changes
82
+ state.youtube_url = (
83
+ state.youtube_url.split("&hash=")[0]
84
+ + f"&hash={state.chunk_duration_ms}-{state.pad_duration_ms}"
85
+ )
86
+ state.asr_stream = stream_text(
87
+ state.youtube_url, state.chunk_duration_ms, state.pad_duration_ms
88
+ )
89
+ state.chunks_taken = 0
90
+ state.lines = deque([], maxlen=5) # limit to the last 5 lines of subs
91
+
92
+ player = st_player(state.youtube_url, **player_options, key="youtube_player")
93
+
94
+ if "asr_stream" in state and player.data and player.data["played"] < 1.0:
95
+ # check how many seconds were played, and if more than processed - write the next text chunk
96
+ processed_seconds = state.chunks_taken * (state.chunk_duration_ms / 1000)
97
+ if processed_seconds < player.data["playedSeconds"]:
98
+ text = next(state.asr_stream)
99
+ state.lines.append(text)
100
+ state.chunks_taken += 1
101
+ if "lines" in state:
102
+ # print the last 3 lines of subs
103
+ st.code("\n".join(state.lines))
104
+
105
+
106
+ if __name__ == "__main__":
107
+ main()