import pixeltable as pxt import os import openai import gradio as gr import getpass from pixeltable.iterators import FrameIterator from pixeltable.functions.video import extract_audio from pixeltable.functions.audio import get_metadata from pixeltable.functions import openai if 'OPENAI_API_KEY' not in os.environ: os.environ['OPENAI_API_KEY'] = getpass.getpass('Enter your OpenAI API key:') # Create a Pixeltable directory to organize related tables pxt.drop_dir('directory', force=True) pxt.create_dir('directory') # Create a table to store video data t = pxt.create_table( 'directory.video_table', { "video": pxt.VideoType(nullable=True), "sm_type": pxt.StringType(nullable=True), } ) # Create a view that automatically extracts frames from videos frames_view = pxt.create_view( "directory.frames", t, iterator=FrameIterator.create(video=t.video, num_frames=2) ) # Create computed columns to store transformations and persist outputs t['audio'] = extract_audio(t.video, format='mp3') t['metadata'] = get_metadata(t.audio) t['transcription'] = openai.transcriptions(audio=t.audio, model='whisper-1') t['transcription_text'] = t.transcription.text # Create a user-defined function (UDF) to construct the prompt # This shows how Pixeltable allows users to extend functionality with custom Python code @pxt.udf def prompt(A: str, B: str) -> list[dict]: return [ {'role': 'system', 'content': 'You are an expert in creating social media content and you generate effective post, based on the video transcript and the type of social media asked for. Please respect the limitations in terms of characters and size of each social media platform'}, {'role': 'user', 'content': f'A: "{A}" \n B: "{B}"'} ] t['message'] = prompt(t.sm_type, t.transcription_text) # Import a function from Pixeltable's built-in library for OpenAI t['response'] = openai.chat_completions(messages=t.message, model='gpt-4o-mini-2024-07-18', max_tokens=500) t['answer'] = t.response.choices[0].message.content MAX_VIDEO_SIZE_MB = 35 def process_and_generate_post(video_file, social_media_type): if not video_file: return "Please upload a video file.", None try: # Check video file size video_size = os.path.getsize(video_file) / (1024 * 1024) # Convert to MB if video_size > MAX_VIDEO_SIZE_MB: return f"The video file is larger than {MAX_VIDEO_SIZE_MB} MB. Please upload a smaller file.", None # # Insert a video into the table. Pixeltable supports referencing external data sources like URLs t.insert([{ "video": video_file, "sm_type": social_media_type }]) # Retrieve Social media posts social_media_post = t.select(t.answer).tail(1)['answer'][0] # Retrieve Audio audio = t.select(t.audio).tail(1)['audio'][0] # Retrieve thumbnails thumbnails = frames_view.select(frames_view.frame).tail(4)['frame'] # Retrieve Pixeltable Table containing all videos and stored data df_output = t.collect().to_pandas() #Display content return social_media_post, thumbnails, df_output, audio except Exception as e: return f"An error occurred: {str(e)}", None # Gradio Interface import gradio as gr def gradio_interface(): with gr.Blocks(theme=gr.themes.Monochrome()) as demo: gr.Markdown( """