| import gradio as gr |
| import pandas as pd |
| import os |
| import json |
| import tempfile |
| from openai import OpenAI |
|
|
| |
| client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) |
|
|
| |
| def transcribe_audio(file_path): |
| with open(file_path, "rb") as audio: |
| transcript = client.audio.transcriptions.create( |
| model="whisper-1", |
| file=audio |
| ) |
| return transcript.text |
|
|
|
|
| |
| def extract_fields(text): |
| prompt = f""" |
| Extract the following fields from the conversation: |
| |
| Name, Phone, Product, Budget, Location, Intent. |
| |
| Return ONLY valid JSON like: |
| {{ |
| "Name": "", |
| "Phone": "", |
| "Product": "", |
| "Budget": "", |
| "Location": "", |
| "Intent": "" |
| }} |
| |
| Conversation: |
| {text} |
| """ |
|
|
| response = client.chat.completions.create( |
| model="gpt-4o-mini", |
| messages=[{"role": "user", "content": prompt}], |
| temperature=0 |
| ) |
|
|
| content = response.choices[0].message.content |
|
|
| try: |
| data = json.loads(content) |
| except: |
| data = { |
| "Name": "", |
| "Phone": "", |
| "Product": "", |
| "Budget": "", |
| "Location": "", |
| "Intent": "" |
| } |
|
|
| return data |
|
|
|
|
| |
| def process_audio(audio_file): |
| if audio_file is None: |
| return "No audio provided", pd.DataFrame(), None |
|
|
| try: |
| file_path = audio_file |
|
|
| |
| text = transcribe_audio(file_path) |
|
|
| |
| data = extract_fields(text) |
|
|
| |
| df = pd.DataFrame([data]) |
|
|
| |
| excel_path = os.path.join(tempfile.gettempdir(), "crm_output.xlsx") |
| df.to_excel(excel_path, index=False) |
|
|
| return text, df, excel_path |
|
|
| except Exception as e: |
| return f"Error: {str(e)}", pd.DataFrame(), None |
|
|
|
|
| |
| with gr.Blocks() as app: |
| gr.Markdown("# ποΈ AI Voice to CRM Auto Filler") |
|
|
| with gr.Tabs(): |
| |
| with gr.Tab("π€ Record Inquiry"): |
| mic_input = gr.Audio( |
| sources=["microphone"], |
| type="filepath", |
| label="Record Audio" |
| ) |
| btn1 = gr.Button("Process Recording") |
|
|
| |
| with gr.Tab("π Upload Voice"): |
| file_input = gr.Audio( |
| sources=["upload"], |
| type="filepath", |
| label="Upload Audio File" |
| ) |
| btn2 = gr.Button("Process File") |
|
|
| |
| transcript_output = gr.Textbox(label="Transcription") |
| table_output = gr.Dataframe(label="Extracted CRM Data") |
| download_btn = gr.File(label="Download Excel") |
|
|
| |
| btn1.click( |
| fn=process_audio, |
| inputs=mic_input, |
| outputs=[transcript_output, table_output, download_btn] |
| ) |
|
|
| btn2.click( |
| fn=process_audio, |
| inputs=file_input, |
| outputs=[transcript_output, table_output, download_btn] |
| ) |
|
|
| |
| app.launch() |
|
|