from fastapi import FastAPI, HTTPException, Request, Query from fastapi.middleware.cors import CORSMiddleware from fastapi_cache import FastAPICache from fastapi_cache.backends.inmemory import InMemoryBackend from fastapi_cache.decorator import cache app = FastAPI() @app.on_event("startup") async def startup(): FastAPICache.init(InMemoryBackend(), prefix="fastapi-cache") from pydantic import BaseModel from typing import List, Dict, Any from helper_functions_api import has_tables, extract_data_from_tag, openrouter_response, md_to_html, search_brave, fetch_and_extract_content, limit_tokens, together_response, insert_data import os from dotenv import load_dotenv, find_dotenv from datetime import datetime, timedelta import asyncio import re # Load environment variables from .env file #load_dotenv("keys.env") TOGETHER_API_KEY = os.getenv('TOGETHER_API_KEY') BRAVE_API_KEY = os.getenv('BRAVE_API_KEY') GROQ_API_KEY = os.getenv("GROQ_API_KEY") HELICON_API_KEY = os.getenv("HELICON_API_KEY") SUPABASE_USER = os.environ['SUPABASE_USER'] SUPABASE_PASSWORD = os.environ['SUPABASE_PASSWORD'] OPENROUTER_API_KEY = "sk-or-v1-"+os.environ['OPENROUTER_API_KEY'] # Groq model names llm_default_small = "llama3-8b-8192" llm_default_medium = "llama3-70b-8192" # Together Model names (fallback) llm_fallback_small = "meta-llama/Llama-3-8b-chat-hf" llm_fallback_medium = "meta-llama/Llama-3-70b-chat-hf" SysPromptJson = "You are now in the role of an expert AI who can extract structured information from user request. Both key and value pairs must be in double quotes. You must respond ONLY with a valid JSON file. Do not add any additional comments." SysPromptList = "You are now in the role of an expert AI who can extract structured information from user request. All elements must be in double quotes. You must respond ONLY with a valid python List. Do not add any additional comments." SysPromptDefault = "You are an expert AI, complete the given task. Do not add any additional comments." SysPromptMd = "You are an expert AI who can create a structured report using information provided in the context from user request.The report should be in markdown format consists of markdown tables structured into subtopics. Do not add any additional comments." sys_prompts = { "offline": { "Chat": "You are an expert AI, complete the given task. Do not add any additional comments.", "Full Text Report": "You are an expert AI who can create a detailed report from user request. The report should be in markdown format. Do not add any additional comments.", "Tabular Report": "You are an expert AI who can create a structured report from user request.The report should be in markdown format structured into subtopics/tables/lists. Do not add any additional comments.", "Tables only": "You are an expert AI who can create a structured tabular report from user request.The report should be in markdown format consists of only markdown tables. Do not add any additional comments.", }, "online": { "Chat": "You are an expert AI, complete the given task using the provided context. Do not add any additional comments.", "Full Text Report": "You are an expert AI who can create a detailed report using information scraped from the internet. You should decide which information is relevant to the given task and use it to create a report. The report should be in markdown format. Do not add any additional comments.", "Tabular Report": """You are an expert AI who can provide answers using internet search results. 1 filter and summarize relevant information, if there are conflicting information, use the latest source. 2. use it to construct a clear and factual answer. Your response should be properly formatted and well readable using markdown formatting. """, "Tables only": "You are an expert AI who can create a structured tabular report using information scraped from the internet. You should decide which information is relevant to the given task. The report should be in markdown format consists of only markdown tables. Do not add any additional comments.", }, } class QueryModel(BaseModel): topic: str = Query(default="", description="input query to generate Report") description: str = Query(default="", description="additional context for report") user_id: str = Query(default="", description="unique user id") user_name: str = Query(default="", description="user name") internet: bool = Query(default=True, description="Enable Internet search") output_format: str = Query(default="Tabular Report", description="Output format for the report", enum=["Chat", "Full Text Report", "Tabular Report", "Tables only"]) data_format: str = Query(default="Structured data", description="Type of data to extract from the internet", enum=["No presets", "Structured data", "Quantitative data"]) generate_charts: bool = Query(default=False, description="Include generated charts") #@cache(expire=604800) async def generate_report(query: QueryModel): query_str = query.topic description = query.description user_id = query.user_id internet = "online" if query.internet else "offline" sys_prompt_output_format = sys_prompts[internet][query.output_format] data_format = query.data_format optimized_search_query = "" all_text_with_urls = [("", "")] if query.internet: search_query = re.sub(r'[^\w\s]', '', description).strip() try: urls, optimized_search_query = await search_brave(search_query, num_results=8) all_text_with_urls = fetch_and_extract_content(data_format, urls, description) additional_context = limit_tokens(str(all_text_with_urls)) prompt = f"Write a factual answer to the query:: {description} #### , refer the provided internet search results reference:{additional_context}" except Exception as e: print(e) query.internet = False print("failed to search/scrape results, falling back to LLM response") if not query.internet: prompt = f"#### COMPLETE THE TASK: {description} #### IN THE CONTEXT OF ### CONTEXT: {query_str}" md_report = together_response(prompt, model=llm_default_medium, SysPrompt=sys_prompt_output_format) html_report = md_to_html(md_report) # Render Charts if has_tables(html_report): print("tables found, creating charts") try: prompt = "convert the numerical data tables in the given content to embedded html plotly.js charts if appropriate, use appropriate colors, \ output format:\ output the full content without any other changes in md format enclosed in tags like this using the following:" + str(md_report) messages = [{"role": 'user', "content": prompt}] md_report_chart = extract_data_from_tag(openrouter_response(messages, model="anthropic/claude-3.5-sonnet"),"report") html_report = md_to_html(md_report_chart) except Exception as e: print(e) print("failed to generate charts, falling back to original report") if user_id != "test": insert_data(user_id, query_str, description, str(all_text_with_urls), md_report) references_html = {} for text, url in all_text_with_urls: references_html[url] = str(md_to_html(text)) return { "report": html_report, "references": references_html, "search_query": optimized_search_query } @app.post("/generate_report") async def api_generate_report(request: Request, query: QueryModel): return await generate_report(query) app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"],)