AhmadMustafa commited on
Commit
65a422d
·
1 Parent(s): 5c4bf91

add: js file for iframe

Browse files
20240226t210135-transcript-diarized.txt DELETED
The diff for this file is too large to render. See raw diff
 
app.py CHANGED
@@ -1,7 +1,7 @@
1
  import json
2
  import os
3
  from dataclasses import dataclass
4
- from typing import Dict, List, Optional, Tuple
5
 
6
  import gradio as gr
7
  import requests
@@ -222,7 +222,7 @@ class TranscriptProcessor:
222
  response_text = completion.choices[0].message.content.strip()
223
  try:
224
  corrected_mapping = json.loads(response_text)
225
- except:
226
  response_text = response_text[
227
  response_text.find("{") : response_text.rfind("}") + 1
228
  ]
@@ -340,7 +340,7 @@ Your job is to find out the timestamp of the best answer given by the interviewe
340
  The way to know if there are multiple takes to a question is to see in the transcript if the same text is repeated, If not then number of takes is 1.
341
  Question 1 should always be the introduction if the speaker has introduced themselves to find the best introduction time (Last timestamp is the best timestamp), Rest of questions should be in the order they were asked.
342
  Return format is:
343
- Call ID is: {{cid}}, Session ID is: {{rsid}}, Origin is: {{origin}}, Call Type is: {{ct}}
344
  1. Question: question
345
  Number of takes: number
346
  Best Answer timestamp: [Timestamp: start_time - end_time]({link_start}://{{origin}}/collab/{{cid}}/{{rsid}}?st={{start_time_in_sec}}&et={{end_time_in_sec}}"').
@@ -388,6 +388,27 @@ def chat(
388
  origin,
389
  ct,
390
  ) -> str:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
391
  try:
392
  client = OpenAI()
393
 
@@ -398,6 +419,8 @@ def chat(
398
  prompt = f"""You are a helpful assistant analyzing transcripts and generating timestamps and URL. Call ID is {cid}, Session ID is {rsid}, origin is {origin}, Call Type is {ct}.
399
  Transcript:\n{transcript_processor.get_transcript()}
400
  If a user asks timestamps for a specific topic, find the start time and end time of that specific topic and return answer in the format:
 
 
401
  Answer format:
402
  Topic: Heading [Timestamp: start_time - end_time]({link_start}://{{origin}}/collab/{{cid}}/{{rsid}}?st={{start_time_in_sec}}&et={{end_time_in_sec}}"').
403
 
@@ -405,7 +428,6 @@ For Example:
405
  If the start time is 10:13 and end time is 10:18, the url will be:
406
  {link_start}://roll.ai/colab/1234aq_12314/51234151?st=613&et=618
407
  In the URL, make sure that after RSID there is ? and then rest of the fields are added via &.
408
-
409
  """
410
  messages = [{"role": "system", "content": prompt}]
411
 
@@ -421,9 +443,18 @@ In the URL, make sure that after RSID there is ? and then rest of the fields are
421
  completion = client.chat.completions.create(
422
  model="gpt-4o-mini",
423
  messages=messages,
 
424
  )
425
 
426
  response = completion.choices[0].message
 
 
 
 
 
 
 
 
427
 
428
  return response.content
429
 
@@ -462,7 +493,8 @@ def create_chat_interface():
462
  flex-grow: 1 !important;
463
  }
464
  """
465
- with gr.Blocks(fill_height=True, fill_width=True, css=css) as demo:
 
466
  chatbot = gr.Chatbot(
467
  elem_id="chatbot_box",
468
  layout="bubble",
@@ -478,6 +510,8 @@ def create_chat_interface():
478
  origin_state = gr.State()
479
  ct_state = gr.State()
480
  turl_state = gr.State()
 
 
481
 
482
  def on_app_load(request: gr.Request):
483
  cid = request.query_params.get("cid", None)
@@ -532,6 +566,7 @@ def create_chat_interface():
532
  chatbot_value = [
533
  (None, initial_analysis)
534
  ] # initialized with initial analysis and assistant is None
 
535
  return [
536
  chatbot_value,
537
  transcript_processor,
@@ -614,7 +649,7 @@ def main():
614
  try:
615
  setup_openai_key()
616
  demo = create_chat_interface()
617
- demo.launch(share=True)
618
  except Exception as e:
619
  print(f"Error starting application: {str(e)}")
620
  raise
 
1
  import json
2
  import os
3
  from dataclasses import dataclass
4
+ from typing import Dict, List
5
 
6
  import gradio as gr
7
  import requests
 
222
  response_text = completion.choices[0].message.content.strip()
223
  try:
224
  corrected_mapping = json.loads(response_text)
225
+ except Exception:
226
  response_text = response_text[
227
  response_text.find("{") : response_text.rfind("}") + 1
228
  ]
 
340
  The way to know if there are multiple takes to a question is to see in the transcript if the same text is repeated, If not then number of takes is 1.
341
  Question 1 should always be the introduction if the speaker has introduced themselves to find the best introduction time (Last timestamp is the best timestamp), Rest of questions should be in the order they were asked.
342
  Return format is:
343
+ Call ID is: {{cid}}, Recording Session ID is: {{rsid}}, Origin is: {{origin}}, Call Type is: {{ct}}
344
  1. Question: question
345
  Number of takes: number
346
  Best Answer timestamp: [Timestamp: start_time - end_time]({link_start}://{{origin}}/collab/{{cid}}/{{rsid}}?st={{start_time_in_sec}}&et={{end_time_in_sec}}"').
 
388
  origin,
389
  ct,
390
  ) -> str:
391
+ tools = [
392
+ {
393
+ "type": "function",
394
+ "function": {
395
+ "name": "correct_speaker_name_with_url",
396
+ "description": "If a User provides a link to Agenda file, call the correct_speaker_name_with_url function to correct the speaker names based on the url, i.e if a user says 'Here is the Luma link for the event' and provides a link to the event, the function will correct the speaker names based on the event.",
397
+ "parameters": {
398
+ "type": "object",
399
+ "properties": {
400
+ "url": {
401
+ "type": "string",
402
+ "description": "The url to the agenda.",
403
+ },
404
+ },
405
+ "required": ["url"],
406
+ "additionalProperties": False,
407
+ },
408
+ },
409
+ }
410
+ ]
411
+
412
  try:
413
  client = OpenAI()
414
 
 
419
  prompt = f"""You are a helpful assistant analyzing transcripts and generating timestamps and URL. Call ID is {cid}, Session ID is {rsid}, origin is {origin}, Call Type is {ct}.
420
  Transcript:\n{transcript_processor.get_transcript()}
421
  If a user asks timestamps for a specific topic, find the start time and end time of that specific topic and return answer in the format:
422
+ If the user provides a link to the agenda, use the correct_speaker_name_with_url function to correct the speaker names based on the agenda.
423
+
424
  Answer format:
425
  Topic: Heading [Timestamp: start_time - end_time]({link_start}://{{origin}}/collab/{{cid}}/{{rsid}}?st={{start_time_in_sec}}&et={{end_time_in_sec}}"').
426
 
 
428
  If the start time is 10:13 and end time is 10:18, the url will be:
429
  {link_start}://roll.ai/colab/1234aq_12314/51234151?st=613&et=618
430
  In the URL, make sure that after RSID there is ? and then rest of the fields are added via &.
 
431
  """
432
  messages = [{"role": "system", "content": prompt}]
433
 
 
443
  completion = client.chat.completions.create(
444
  model="gpt-4o-mini",
445
  messages=messages,
446
+ tools=tools,
447
  )
448
 
449
  response = completion.choices[0].message
450
+ if response.function_call:
451
+ args = json.loads(response.function_call.arguments)
452
+ url = args.get("url", None)
453
+ if url:
454
+ transcript_processor.correct_speaker_mapping_with_agenda(url)
455
+ return "Speaker names corrected based on the agenda."
456
+ else:
457
+ return "No URL provided for correcting speaker names."
458
 
459
  return response.content
460
 
 
493
  flex-grow: 1 !important;
494
  }
495
  """
496
+
497
+ with gr.Blocks(fill_height=True, fill_width=True, css=css, js="index.js") as demo:
498
  chatbot = gr.Chatbot(
499
  elem_id="chatbot_box",
500
  layout="bubble",
 
510
  origin_state = gr.State()
511
  ct_state = gr.State()
512
  turl_state = gr.State()
513
+ iframe_html = "<iframe id='link-frame'></iframe>"
514
+ gr.HTML(value=iframe_html) # Add iframe to the UI
515
 
516
  def on_app_load(request: gr.Request):
517
  cid = request.query_params.get("cid", None)
 
566
  chatbot_value = [
567
  (None, initial_analysis)
568
  ] # initialized with initial analysis and assistant is None
569
+
570
  return [
571
  chatbot_value,
572
  transcript_processor,
 
649
  try:
650
  setup_openai_key()
651
  demo = create_chat_interface()
652
+ demo.launch(debug=True)
653
  except Exception as e:
654
  print(f"Error starting application: {str(e)}")
655
  raise
index.js ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ function createIframeHandler() {
2
+ let iframe = document.getElementById('link-frame');
3
+ if (!iframe) {
4
+ iframe = document.createElement('iframe');
5
+ iframe.id = 'link-frame';
6
+ iframe.style.position = 'absolute';
7
+ iframe.style.width = '1px';
8
+ iframe.style.height = '1px';
9
+ iframe.style.right = '-100px';
10
+ iframe.style.bottom = '-100px';
11
+ iframe.style.display = 'none'; // Hidden initially
12
+ document.body.appendChild(iframe);
13
+ }
14
+
15
+ document.addEventListener('click', function (event) {
16
+ var link = event.target.closest('a');
17
+ if (link && link.href) {
18
+ try {
19
+ iframe.src = link.href;
20
+ iframe.style.display = 'block'; // Show iframe on link click
21
+ event.preventDefault();
22
+ console.log('Opening link in iframe:', link.href);
23
+ } catch (error) {
24
+ console.error('Failed to open link in iframe:', error);
25
+ }
26
+ }
27
+ });
28
+
29
+ return 'Iframe handler initialized';
30
+ }
step_take19AWS.json DELETED
The diff for this file is too large to render. See raw diff