PierreBrunelle commited on
Commit
d294ddd
1 Parent(s): 27c954a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -8,12 +8,12 @@ from pixeltable.functions.video import extract_audio
8
  from pixeltable.functions.audio import get_metadata
9
  from pixeltable.functions import openai
10
 
11
- """## Store OpenAI API Key"""
12
 
13
  if 'OPENAI_API_KEY' not in os.environ:
14
  os.environ['OPENAI_API_KEY'] = getpass.getpass('Enter your OpenAI API key:')
15
 
16
- """## Create a Table, a View, and Computed Columns"""
17
 
18
  pxt.drop_dir('directory', force=True)
19
  pxt.create_dir('directory')
@@ -37,7 +37,7 @@ t['metadata'] = get_metadata(t.audio)
37
  t['transcription'] = openai.transcriptions(audio=t.audio, model='whisper-1')
38
  t['transcription_text'] = t.transcription.text
39
 
40
- """## Custom UDF for Generating Social Media Prompts"""
41
 
42
  #Custom User-Defined Function (UDF) for Generating Social Media Prompts
43
  @pxt.udf
@@ -100,7 +100,7 @@ def process_and_generate_post(video_file, social_media_type, progress=gr.Progres
100
  progress(0.8, desc="Preparing results...")
101
 
102
  # Retrieve Pixeltable Table containing all videos and stored data
103
- df_output = t.select(t.transcription_text).collect().to_pandas()
104
 
105
  #Display content
106
  return social_media_post, thumbnails, df_output, audio
@@ -173,8 +173,8 @@ def gradio_interface():
173
  )
174
  audio = gr.Audio(label="Extracted audio", show_download_button=True)
175
 
176
- df_output = gr.DataFrame(label="Transcription")
177
-
178
  generate_btn.click(
179
  fn=process_and_generate_post,
180
  trigger_mode='once',
 
8
  from pixeltable.functions.audio import get_metadata
9
  from pixeltable.functions import openai
10
 
11
+ # Store OpenAI API Key
12
 
13
  if 'OPENAI_API_KEY' not in os.environ:
14
  os.environ['OPENAI_API_KEY'] = getpass.getpass('Enter your OpenAI API key:')
15
 
16
+ # Create a Table, a View, and Computed Columns
17
 
18
  pxt.drop_dir('directory', force=True)
19
  pxt.create_dir('directory')
 
37
  t['transcription'] = openai.transcriptions(audio=t.audio, model='whisper-1')
38
  t['transcription_text'] = t.transcription.text
39
 
40
+ # Custom UDF for Generating Social Media Prompts
41
 
42
  #Custom User-Defined Function (UDF) for Generating Social Media Prompts
43
  @pxt.udf
 
100
  progress(0.8, desc="Preparing results...")
101
 
102
  # Retrieve Pixeltable Table containing all videos and stored data
103
+ df_output = t.select(t.transcription_text).tail(1)['transcription_text'][0]
104
 
105
  #Display content
106
  return social_media_post, thumbnails, df_output, audio
 
173
  )
174
  audio = gr.Audio(label="Extracted audio", show_download_button=True)
175
 
176
+ df_output = gr.Textbox(label="Transcription", show_copy_button=True)
177
+
178
  generate_btn.click(
179
  fn=process_and_generate_post,
180
  trigger_mode='once',