Spaces:
Sleeping
Sleeping
Commit
·
09425e4
1
Parent(s):
696b702
add: initial HF space
Browse files- chat.py +424 -0
- step_take19AWS.json +0 -0
chat.py
ADDED
@@ -0,0 +1,424 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
from dataclasses import dataclass
|
4 |
+
from typing import Dict, List, Optional, Tuple
|
5 |
+
|
6 |
+
import gradio as gr
|
7 |
+
import requests
|
8 |
+
from bs4 import BeautifulSoup
|
9 |
+
from openai import OpenAI
|
10 |
+
|
11 |
+
street_interview = True
|
12 |
+
|
13 |
+
|
14 |
+
@dataclass
|
15 |
+
class TranscriptSegment:
|
16 |
+
speaker_id: str
|
17 |
+
start_time: float
|
18 |
+
end_time: float
|
19 |
+
text: str
|
20 |
+
speaker_name: str = ""
|
21 |
+
|
22 |
+
|
23 |
+
class TranscriptProcessor:
|
24 |
+
def __init__(self, transcript_file: str):
|
25 |
+
self.transcript_file = transcript_file
|
26 |
+
self.transcript_data = None
|
27 |
+
self.formatted_transcript = None
|
28 |
+
self.segments = []
|
29 |
+
self.text_windows = []
|
30 |
+
self.window_size = 2
|
31 |
+
self.speaker_mapping = {}
|
32 |
+
self._load_transcript()
|
33 |
+
self._process_transcript()
|
34 |
+
self.map_speaker_ids_to_names() # Map speaker IDs to names
|
35 |
+
|
36 |
+
def _load_transcript(self) -> None:
|
37 |
+
"""Load the transcript JSON file."""
|
38 |
+
with open(self.transcript_file, "r") as f:
|
39 |
+
self.transcript_data = json.load(f)
|
40 |
+
|
41 |
+
def _format_time(self, seconds: float) -> str:
|
42 |
+
"""Convert seconds to formatted time string (MM:SS)."""
|
43 |
+
minutes = int(seconds // 60)
|
44 |
+
seconds = int(seconds % 60)
|
45 |
+
return f"{minutes:02d}:{seconds:02d}"
|
46 |
+
|
47 |
+
def _process_transcript(self) -> None:
|
48 |
+
"""Process the transcript into segments with speaker information and create a formatted version with timestamps."""
|
49 |
+
results = self.transcript_data["results"]
|
50 |
+
|
51 |
+
# Process into segments
|
52 |
+
for segment in results["speaker_labels"]["segments"]:
|
53 |
+
speaker_id = segment.get("speaker_label", segment.get("speakerlabel", ""))
|
54 |
+
speaker_id = (
|
55 |
+
speaker_id.replace("spk_", "").replace("spk", "") if speaker_id else ""
|
56 |
+
)
|
57 |
+
|
58 |
+
start_time = float(segment.get("start_time", 0))
|
59 |
+
end_time = float(segment.get("end_time", 0))
|
60 |
+
|
61 |
+
items = [
|
62 |
+
item
|
63 |
+
for item in results["items"]
|
64 |
+
if "start_time" in item
|
65 |
+
and float(item["start_time"]) >= start_time
|
66 |
+
and float(item["start_time"]) < end_time
|
67 |
+
and item["type"] == "pronunciation"
|
68 |
+
]
|
69 |
+
|
70 |
+
words = [item["alternatives"][0]["content"] for item in items]
|
71 |
+
if words:
|
72 |
+
self.segments.append(
|
73 |
+
TranscriptSegment(
|
74 |
+
speaker_id=speaker_id,
|
75 |
+
start_time=start_time,
|
76 |
+
end_time=end_time,
|
77 |
+
text=" ".join(words),
|
78 |
+
)
|
79 |
+
)
|
80 |
+
|
81 |
+
formatted_segments = []
|
82 |
+
for seg in self.segments:
|
83 |
+
start_time_str = self._format_time(seg.start_time)
|
84 |
+
end_time_str = self._format_time(seg.end_time)
|
85 |
+
formatted_segments.append(
|
86 |
+
f"time_stamp: {start_time_str}-{end_time_str}\n"
|
87 |
+
f"spk {seg.speaker_id}: {seg.text}\n"
|
88 |
+
)
|
89 |
+
|
90 |
+
self.formatted_transcript = "\n".join(formatted_segments)
|
91 |
+
|
92 |
+
# Create sliding windows of text for better matching
|
93 |
+
for i in range(len(self.segments)):
|
94 |
+
# Combine current segment with next segments within window
|
95 |
+
window_segments = self.segments[i : i + self.window_size]
|
96 |
+
combined_text = " ".join(seg.text for seg in window_segments)
|
97 |
+
if window_segments:
|
98 |
+
self.text_windows.append(
|
99 |
+
{
|
100 |
+
"text": combined_text,
|
101 |
+
"start_time": window_segments[0].start_time,
|
102 |
+
"end_time": window_segments[-1].end_time,
|
103 |
+
}
|
104 |
+
)
|
105 |
+
|
106 |
+
def map_speaker_ids_to_names(self) -> None:
|
107 |
+
"""Map speaker IDs to names based on introductions in the transcript."""
|
108 |
+
try:
|
109 |
+
|
110 |
+
transcript = self.formatted_transcript
|
111 |
+
|
112 |
+
prompt = (
|
113 |
+
"Given the following transcript where speakers are identified as spk 0, spk 1, spk 2, etc., please map each spk ID to the speaker's name based on their introduction in the transcript. If no name is introduced for a speaker, keep it as spk_id. Return the mapping as a JSON object in the format {'spk_0': 'Speaker Name', 'spk_1': 'Speaker Name', ...}\n\n"
|
114 |
+
f"Transcript:\n{transcript}"
|
115 |
+
)
|
116 |
+
|
117 |
+
client = OpenAI()
|
118 |
+
|
119 |
+
completion = client.chat.completions.create(
|
120 |
+
model="gpt-4o-mini",
|
121 |
+
messages=[
|
122 |
+
{"role": "system", "content": "You are a helpful assistant."},
|
123 |
+
{"role": "user", "content": prompt},
|
124 |
+
],
|
125 |
+
temperature=0,
|
126 |
+
)
|
127 |
+
|
128 |
+
response_text = completion.choices[0].message.content.strip()
|
129 |
+
try:
|
130 |
+
self.speaker_mapping = json.loads(response_text)
|
131 |
+
except json.JSONDecodeError:
|
132 |
+
# extract left most and right most {}
|
133 |
+
response_text = response_text[
|
134 |
+
response_text.find("{") : response_text.rfind("}") + 1
|
135 |
+
]
|
136 |
+
try:
|
137 |
+
self.speaker_mapping = json.loads(response_text)
|
138 |
+
except json.JSONDecodeError:
|
139 |
+
print("Error parsing speaker mapping JSON.")
|
140 |
+
self.speaker_mapping = {}
|
141 |
+
for segment in self.segments:
|
142 |
+
spk_id = f"spk_{segment.speaker_id}"
|
143 |
+
speaker_name = self.speaker_mapping.get(spk_id, spk_id)
|
144 |
+
segment.speaker_name = speaker_name # Store the speaker name
|
145 |
+
|
146 |
+
# Recreate the formatted transcript with speaker names
|
147 |
+
formatted_segments = []
|
148 |
+
for seg in self.segments:
|
149 |
+
start_time_str = self._format_time(seg.start_time)
|
150 |
+
end_time_str = self._format_time(seg.end_time)
|
151 |
+
formatted_segments.append(
|
152 |
+
f"time_stamp: {start_time_str}-{end_time_str}\n"
|
153 |
+
f"{seg.speaker_name}: {seg.text}\n"
|
154 |
+
)
|
155 |
+
self.formatted_transcript = "\n".join(formatted_segments)
|
156 |
+
|
157 |
+
except Exception as e:
|
158 |
+
print(f"Error mapping speaker IDs to names: {str(e)}")
|
159 |
+
self.speaker_mapping = {}
|
160 |
+
|
161 |
+
def correct_speaker_mapping_with_agenda(self, url: str) -> None:
|
162 |
+
"""Fetch agenda from a URL and correct the speaker mapping using OpenAI."""
|
163 |
+
try:
|
164 |
+
# Fetch the HTML content from the URL
|
165 |
+
response = requests.get(url)
|
166 |
+
response.raise_for_status()
|
167 |
+
html_content = response.text
|
168 |
+
|
169 |
+
# Parse the HTML to find the desired description
|
170 |
+
soup = BeautifulSoup(html_content, "html.parser")
|
171 |
+
description_tag = soup.find(
|
172 |
+
"script", {"type": "application/ld+json"}
|
173 |
+
) # Find the ld+json metadata block
|
174 |
+
agenda = ""
|
175 |
+
|
176 |
+
if description_tag:
|
177 |
+
# Extract the JSON content
|
178 |
+
json_data = json.loads(description_tag.string)
|
179 |
+
if "description" in json_data:
|
180 |
+
agenda = json_data["description"]
|
181 |
+
else:
|
182 |
+
print("Agenda description not found in the JSON metadata.")
|
183 |
+
else:
|
184 |
+
print("No structured data (ld+json) found.")
|
185 |
+
|
186 |
+
if not agenda:
|
187 |
+
print("No agenda found in the structured metadata. Trying meta tags.")
|
188 |
+
|
189 |
+
# Fallback: Use meta description if ld+json doesn't have it
|
190 |
+
meta_description = soup.find("meta", {"name": "description"})
|
191 |
+
agenda = meta_description["content"] if meta_description else ""
|
192 |
+
|
193 |
+
if not agenda:
|
194 |
+
print("No agenda found in any description tags.")
|
195 |
+
return
|
196 |
+
|
197 |
+
prompt = (
|
198 |
+
f"Given the speaker mapping {self.speaker_mapping}, agenda:\n{agenda}, and the transcript: {self.formatted_transcript}\n\n"
|
199 |
+
"Some speaker names in the mapping might have spelling errors or be incomplete."
|
200 |
+
"Please correct the names based on the agenda. Return the corrected mapping in JSON format as "
|
201 |
+
"{'spk_0': 'Correct Name', 'spk_1': 'Correct Name', ...}."
|
202 |
+
"You should only update the name if the name sounds very similar, or there is a good spelling overlap/ The Speaker Introduction matches the description of the Talk from Agends. If the name is totally unrelated, keep the original name."
|
203 |
+
)
|
204 |
+
|
205 |
+
# Use OpenAI API to get corrected mapping
|
206 |
+
client = OpenAI()
|
207 |
+
|
208 |
+
completion = client.chat.completions.create(
|
209 |
+
model="gpt-4o-mini",
|
210 |
+
messages=[
|
211 |
+
{"role": "system", "content": "You are a helpful assistant."},
|
212 |
+
{"role": "user", "content": prompt},
|
213 |
+
],
|
214 |
+
temperature=0,
|
215 |
+
)
|
216 |
+
|
217 |
+
response_text = completion.choices[0].message.content.strip()
|
218 |
+
try:
|
219 |
+
corrected_mapping = json.loads(response_text)
|
220 |
+
except:
|
221 |
+
response_text = response_text[
|
222 |
+
response_text.find("{") : response_text.rfind("}") + 1
|
223 |
+
]
|
224 |
+
try:
|
225 |
+
corrected_mapping = json.loads(response_text)
|
226 |
+
except json.JSONDecodeError:
|
227 |
+
print(
|
228 |
+
"Error parsing corrected speaker mapping JSON, keeping the original mapping."
|
229 |
+
)
|
230 |
+
corrected_mapping = self.speaker_mapping
|
231 |
+
# Update the speaker mapping with corrected names
|
232 |
+
self.speaker_mapping = corrected_mapping
|
233 |
+
print("Corrected Speaker Mapping:", self.speaker_mapping)
|
234 |
+
|
235 |
+
# Update the transcript segments with corrected names
|
236 |
+
for segment in self.segments:
|
237 |
+
spk_id = f"spk_{segment.speaker_id}"
|
238 |
+
segment.speaker_name = self.speaker_mapping.get(spk_id, spk_id)
|
239 |
+
|
240 |
+
# Recreate the formatted transcript with corrected names
|
241 |
+
formatted_segments = []
|
242 |
+
for seg in self.segments:
|
243 |
+
start_time_str = self._format_time(seg.start_time)
|
244 |
+
end_time_str = self._format_time(seg.end_time)
|
245 |
+
formatted_segments.append(
|
246 |
+
f"time_stamp: {start_time_str}-{end_time_str}\n"
|
247 |
+
f"{seg.speaker_name}: {seg.text}\n"
|
248 |
+
)
|
249 |
+
self.formatted_transcript = "\n".join(formatted_segments)
|
250 |
+
|
251 |
+
except requests.exceptions.RequestException as e:
|
252 |
+
print(f"Error fetching agenda from URL: {str(e)}")
|
253 |
+
except Exception as e:
|
254 |
+
print(f"Error correcting speaker mapping: {str(e)}")
|
255 |
+
|
256 |
+
def get_transcript(self) -> str:
|
257 |
+
"""Return the formatted transcript with speaker names."""
|
258 |
+
return self.formatted_transcript
|
259 |
+
|
260 |
+
def get_transcript_data(self) -> Dict:
|
261 |
+
"""Return the raw transcript data."""
|
262 |
+
return self.transcript_data
|
263 |
+
|
264 |
+
|
265 |
+
def setup_openai_key() -> None:
|
266 |
+
"""Set up OpenAI API key from file."""
|
267 |
+
try:
|
268 |
+
with open("api.key", "r") as f:
|
269 |
+
os.environ["OPENAI_API_KEY"] = f.read().strip()
|
270 |
+
except FileNotFoundError:
|
271 |
+
raise FileNotFoundError(
|
272 |
+
"api.key file not found. Please create it with your OpenAI API key."
|
273 |
+
)
|
274 |
+
|
275 |
+
|
276 |
+
def get_initial_analysis(transcript_processor: TranscriptProcessor) -> str:
|
277 |
+
"""Perform initial analysis of the transcript using OpenAI."""
|
278 |
+
try:
|
279 |
+
transcript = transcript_processor.get_transcript()
|
280 |
+
# print("Transcript is: ", transcript)
|
281 |
+
client = OpenAI()
|
282 |
+
if street_interview:
|
283 |
+
prompt = f"""This is a transcript for a street interview. Transcript: {transcript}
|
284 |
+
In this street interview, the host asks multiple questions to the interviewees.
|
285 |
+
The interviewee can repeat a single answer multiple time to get the best take.
|
286 |
+
Your job is to find out the timestamp of the best answer given by the interviewee (Do not include the Question timestamp by interviwer in this). If there are multiple attempts for a question, best part is the last part of the question. If no question was asked but something is repeated, please include that in the answer as well
|
287 |
+
The way to know if there are multiple takes to a question is to see in the transcript if the same text is repeated, If not then number of takes is 1.
|
288 |
+
Question 1 should always be the introduction if the speaker has introduced themselves to find the best introduction time (Last timestamp is the best timestamp), Rest of questions should be in the order they were asked.
|
289 |
+
Return format is:
|
290 |
+
1. Question: question
|
291 |
+
Number of takes: number
|
292 |
+
Best Answer timestamp: start_time - end_time
|
293 |
+
You can visit the call segment on this URL: https://roll.ai/call_id/colab_id?starttime=start_time?endtime=end_time."
|
294 |
+
"""
|
295 |
+
else:
|
296 |
+
prompt = f"""Given the transcript {transcript}, For All the speakers, short list all people, news, events, trends, and source that are discussed by speakers along with the start time of that topic and end time of that topic from the transcript. Rank all topics based on what would make for the best social clips. I need atleast 3 topics per speaker.
|
297 |
+
You should mention the Speaker Name first, then 3 posts with their timestamps, and so on.
|
298 |
+
Return format is: Speaker Name\n1.Topic: topic, Start Time: start_time, End Time: end_time\n2...."""
|
299 |
+
|
300 |
+
print(prompt)
|
301 |
+
completion = client.chat.completions.create(
|
302 |
+
model="gpt-4o-mini",
|
303 |
+
messages=[
|
304 |
+
{"role": "system", "content": "You are a helpful assistant."},
|
305 |
+
{"role": "user", "content": prompt},
|
306 |
+
],
|
307 |
+
)
|
308 |
+
return completion.choices[0].message.content
|
309 |
+
except Exception as e:
|
310 |
+
print(f"Error in initial analysis: {str(e)}")
|
311 |
+
return "An error occurred during initial analysis. Please check your API key and file path."
|
312 |
+
|
313 |
+
|
314 |
+
call_id = "20240226t210135"
|
315 |
+
colab_id = "1231412431212"
|
316 |
+
|
317 |
+
|
318 |
+
def generate_call_link(start_time: str) -> str:
|
319 |
+
"""Generate a link to the call at a specific timestamp."""
|
320 |
+
formatted_time = start_time.replace(":", ".")
|
321 |
+
return f"https://roll.ai/{call_id}/{colab_id}?t={formatted_time}"
|
322 |
+
|
323 |
+
|
324 |
+
def chat(
|
325 |
+
message: str, chat_history: List, transcript_processor: TranscriptProcessor
|
326 |
+
) -> str:
|
327 |
+
try:
|
328 |
+
client = OpenAI()
|
329 |
+
# if street_interview:
|
330 |
+
# prompt = f"""You are a helpful assistant analyzing transcripts and generating timestamps and URL. Call ID is {call_id} and Colab ID is {colab_id}.
|
331 |
+
# Transcript: {transcript_processor.get_transcript()}
|
332 |
+
# If a user asks t
|
333 |
+
# """
|
334 |
+
# else:
|
335 |
+
prompt = f"""You are a helpful assistant analyzing transcripts and generating timestamps and URL. Call ID is {call_id} and Colab ID is {colab_id}.
|
336 |
+
Transcript: {transcript_processor.get_transcript()}
|
337 |
+
If a user asks timestamps for a specific topic, find the start time and end time of that specific topic and return answer in the format: 'Timestamp: start_time - end_time'.
|
338 |
+
You can visit the call segment on this URL: https://roll.ai/call_id/colab_id?starttime=start_time?endtime=end_time."
|
339 |
+
If a user requests a link to a specific segment topic, generate a link to that segment using the following format: https://roll.ai/call_id/colab_id?starttime=start_time?endtime=end_time."""
|
340 |
+
|
341 |
+
messages = [{"role": "system", "content": prompt}]
|
342 |
+
|
343 |
+
for user_msg, assistant_msg in chat_history:
|
344 |
+
if user_msg is not None: # Skip the initial message where user_msg is None
|
345 |
+
messages.append({"role": "user", "content": user_msg})
|
346 |
+
if assistant_msg is not None:
|
347 |
+
messages.append({"role": "assistant", "content": assistant_msg})
|
348 |
+
|
349 |
+
# Add the current message
|
350 |
+
messages.append({"role": "user", "content": message})
|
351 |
+
|
352 |
+
completion = client.chat.completions.create(
|
353 |
+
model="gpt-4o-mini",
|
354 |
+
messages=messages,
|
355 |
+
)
|
356 |
+
|
357 |
+
response = completion.choices[0].message
|
358 |
+
|
359 |
+
return response.content
|
360 |
+
|
361 |
+
except Exception as e:
|
362 |
+
print(f"Unexpected error in chat: {str(e)}")
|
363 |
+
import traceback
|
364 |
+
|
365 |
+
print(f"Traceback: {traceback.format_exc()}")
|
366 |
+
return "Sorry, there was an error processing your request."
|
367 |
+
|
368 |
+
|
369 |
+
def create_chat_interface(transcript_processor: TranscriptProcessor):
|
370 |
+
"""Create and configure the chat interface."""
|
371 |
+
|
372 |
+
def respond(message: str, chat_history: List) -> Tuple[str, List]:
|
373 |
+
if not message:
|
374 |
+
return "", chat_history
|
375 |
+
|
376 |
+
bot_message = chat(message, chat_history, transcript_processor)
|
377 |
+
new_history = list(chat_history)
|
378 |
+
new_history.append((message, bot_message))
|
379 |
+
return "", new_history
|
380 |
+
|
381 |
+
with gr.Blocks() as demo:
|
382 |
+
chatbot = gr.Chatbot()
|
383 |
+
msg = gr.Textbox()
|
384 |
+
clear = gr.ClearButton([msg, chatbot])
|
385 |
+
|
386 |
+
# Initialize with transcript analysis
|
387 |
+
initial_analysis = get_initial_analysis(transcript_processor)
|
388 |
+
|
389 |
+
def init_chat():
|
390 |
+
return [(None, initial_analysis)]
|
391 |
+
|
392 |
+
chatbot.value = init_chat()
|
393 |
+
msg.submit(respond, [msg, chatbot], [msg, chatbot])
|
394 |
+
|
395 |
+
return demo
|
396 |
+
|
397 |
+
|
398 |
+
def main():
|
399 |
+
"""Main function to run the application."""
|
400 |
+
try:
|
401 |
+
setup_openai_key()
|
402 |
+
|
403 |
+
current_dir = os.path.dirname(os.path.abspath(__file__))
|
404 |
+
transcript_file = os.path.join(current_dir, "step_take19AWS.json")
|
405 |
+
|
406 |
+
if not os.path.exists(transcript_file):
|
407 |
+
raise FileNotFoundError(
|
408 |
+
"Transcript file not found. Please check the file path."
|
409 |
+
)
|
410 |
+
|
411 |
+
transcript_processor = TranscriptProcessor(transcript_file)
|
412 |
+
transcript_processor.correct_speaker_mapping_with_agenda(
|
413 |
+
"https://lu.ma/STEPSF24"
|
414 |
+
)
|
415 |
+
demo = create_chat_interface(transcript_processor)
|
416 |
+
demo.launch(share=True)
|
417 |
+
|
418 |
+
except Exception as e:
|
419 |
+
print(f"Error starting application: {str(e)}")
|
420 |
+
raise
|
421 |
+
|
422 |
+
|
423 |
+
if __name__ == "__main__":
|
424 |
+
main()
|
step_take19AWS.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|