Spaces:
Running
Running
File size: 3,315 Bytes
b1c8f17 b5b2e6a b1c8f17 b5b2e6a b1c8f17 b5b2e6a 270e05e b1c8f17 270e05e b5b2e6a b1c8f17 b5b2e6a b1c8f17 b5b2e6a b1c8f17 b5b2e6a b1c8f17 b5b2e6a b1c8f17 270e05e b1c8f17 949bf2b c8510e0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 |
from fastapi import FastAPI, HTTPException, Request, Query
from pydantic import BaseModel
from typing import List, Dict, Any
from helper_functions_api import md_to_html, search_brave, fetch_and_extract_content, limit_tokens, together_response, insert_data
import os
from dotenv import load_dotenv, find_dotenv
# Load environment variables from .env file
# load_dotenv("keys.env")
app = FastAPI()
TOGETHER_API_KEY = os.getenv('TOGETHER_API_KEY')
BRAVE_API_KEY = os.getenv('BRAVE_API_KEY')
GROQ_API_KEY = os.getenv("GROQ_API_KEY")
HELICON_API_KEY = os.getenv("HELICON_API_KEY")
SUPABASE_USER = os.environ['SUPABASE_USER']
SUPABASE_PASSWORD = os.environ['SUPABASE_PASSWORD']
llm_default_small = "llama3-8b-8192"
llm_default_medium = "llama3-70b-8192"
SysPromptJson = "You are now in the role of an expert AI who can extract structured information from user request. Both key and value pairs must be in double quotes. You must respond ONLY with a valid JSON file. Do not add any additional comments."
SysPromptList = "You are now in the role of an expert AI who can extract structured information from user request. All elements must be in double quotes. You must respond ONLY with a valid python List. Do not add any additional comments."
SysPromptDefault = "You are an expert AI, complete the given task. Do not add any additional comments."
SysPromptMd = "You are an expert AI who can create a structured report using information provided in the context from user request.The report should be in markdown format consists of markdown tables structured into subtopics. Do not add any additional comments."
class Query(BaseModel):
query: str = Query(default="market research", description="input query to generate Report")
description: str = Query(default="", description="additional context for report")
user_id: str = Query(default="", description="unique user id")
user_name: str = Query(default="", description="user name")
@app.post("/generate_report")
async def generate_report(request: Request, query: Query):
query_str = query.query
description = query.description
user_id = query.user_id
# Combine query with user keywords
search_query = query_str
# Search for relevant URLs
urls = search_brave(search_query, num_results=4)
# Fetch and extract content from the URLs
all_text_with_urls = fetch_and_extract_content(urls, query_str)
# Prepare the prompt for generating the report
additional_context = limit_tokens(str(all_text_with_urls))
prompt = f"#### ADDITIONAL CONTEXT:{additional_context} #### CREATE A DETAILED REPORT FOR THE QUERY:{query_str} #### IN THE CONTEXT OF ### CONTEXT: {description}"
md_report = together_response(prompt, model=llm_default_medium, SysPrompt=SysPromptMd)
# Insert data into database (or other storage)
insert_data(user_id, query_str, description, str(all_text_with_urls), md_report)
references_html = dict()
for text, url in all_text_with_urls:
references_html[url] = str(md_to_html(text))
# Return the generated report
return {
"report": md_to_html(md_report),
"references": references_html
}
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
|