import os import requests from dotenv import load_dotenv from pydub import AudioSegment import tempfile from openai import OpenAI load_dotenv() HF_API_URL = os.getenv("HF_API_URL") HF_API_KEY = os.getenv("HF_API_KEY") OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") openai_client = OpenAI(api_key=OPENAI_API_KEY) def transcribe_audio(file_path): try: audio = AudioSegment.from_file(file_path) audio = audio.set_frame_rate(16000).set_channels(1) temp_wav = tempfile.NamedTemporaryFile(suffix=".wav", delete=False) audio.export(temp_wav.name, format="wav") with open(temp_wav.name, "rb") as f: response = openai_client.audio.transcriptions.create( file=f, model="whisper-1" ) os.remove(temp_wav.name) return response.text except Exception as e: return f"Error in transcription: {e}" def generate_response(prompt, tone="advisor", model="OpenAI GPT-4o", stream=False): try: if model == "OpenAI GPT-4o": completion = openai_client.chat.completions.create( model="gpt-4o", messages=[ {"role": "system", "content": f"You are an {tone} expert assistant."}, {"role": "user", "content": prompt} ] ) return completion.choices[0].message.content.strip() elif model == "Claude 3 Opus": headers = { "x-api-key": os.getenv("CLAUDE_API_KEY"), "anthropic-version": "2023-06-01" } data = { "model": "claude-3-opus-20240229", "messages": [{"role": "user", "content": prompt}] } response = requests.post( "https://api.anthropic.com/v1/messages", headers=headers, json=data ) return response.json()["content"][0]["text"].strip() elif model == "Mistral-7B": headers = {"Authorization": f"Bearer {HF_API_KEY}"} data = {"inputs": prompt} response = requests.post(HF_API_URL, headers=headers, json=data) response.raise_for_status() return response.json()[0]["generated_text"].strip() else: return "⚠️ Invalid model selection." except Exception as e: return f"Error generating response: {e}"