Spaces:
Runtime error
Runtime error
Update main.py
Browse files
main.py
CHANGED
@@ -1,182 +1,83 @@
|
|
|
|
|
|
|
|
|
|
1 |
import os
|
2 |
-
from typing import List, Dict, Any
|
3 |
-
from datetime import datetime, timedelta
|
4 |
-
import re
|
5 |
from functools import lru_cache
|
|
|
6 |
|
7 |
-
from fastapi import FastAPI, HTTPException, Request, Query, Depends
|
8 |
-
from fastapi.middleware.cors import CORSMiddleware
|
9 |
-
from pydantic import BaseModel, Field
|
10 |
-
from fastapi_cache import FastAPICache
|
11 |
-
from fastapi_cache.backends.inmemory import InMemoryBackend
|
12 |
-
from fastapi_cache.decorator import cache
|
13 |
-
from dotenv import load_dotenv
|
14 |
-
|
15 |
-
from helper_functions_api import (
|
16 |
-
has_tables, extract_data_from_tag, openrouter_response, md_to_html,
|
17 |
-
search_brave, fetch_and_extract_content, limit_tokens, together_response, insert_data
|
18 |
-
)
|
19 |
-
|
20 |
-
# Load environment variables
|
21 |
-
load_dotenv()
|
22 |
-
|
23 |
-
# Constants
|
24 |
-
LLM_MODELS = {
|
25 |
-
"default": {
|
26 |
-
"small": "llama3-8b-8192",
|
27 |
-
"medium": "llama3-70b-8192"
|
28 |
-
},
|
29 |
-
"fallback": {
|
30 |
-
"small": "meta-llama/Llama-3-8b-chat-hf",
|
31 |
-
"medium": "meta-llama/Llama-3-70b-chat-hf"
|
32 |
-
}
|
33 |
-
}
|
34 |
-
|
35 |
-
SYSTEM_PROMPTS = {
|
36 |
-
"json": "You are now in the role of an expert AI who can extract structured information from user request. Both key and value pairs must be in double quotes. You must respond ONLY with a valid JSON file. Do not add any additional comments.",
|
37 |
-
"list": "You are now in the role of an expert AI who can extract structured information from user request. All elements must be in double quotes. You must respond ONLY with a valid python List. Do not add any additional comments.",
|
38 |
-
"default": "You are an expert AI, complete the given task. Do not add any additional comments.",
|
39 |
-
"md": "You are an expert AI who can create a structured report using information provided in the context from user request. The report should be in markdown format consists of markdown tables structured into subtopics. Do not add any additional comments.",
|
40 |
-
"online": """You are an expert AI who can create a detailed structured report using internet search results.
|
41 |
-
1. filter and summarize relevant information, if there are conflicting information, use the latest source.
|
42 |
-
2. use it to construct a clear and factual answer.
|
43 |
-
Your response should be structured and properly formatted using markdown headings, subheadings, tables, use as necessary. Ignore Links and references""",
|
44 |
-
"offline": "You are an expert AI who can create detailed answers. Your response should be properly formatted and well readable using markdown formatting."
|
45 |
-
}
|
46 |
-
|
47 |
-
# Prompt templates
|
48 |
-
PROMPT_TEMPLATES = {
|
49 |
-
"online": {
|
50 |
-
"chat": "Write a well thought out, detailed and structured answer to the query:: {description} #### , refer the provided internet search results reference:{reference}",
|
51 |
-
"report": "Write a well thought out, detailed and structured Report to the query:: {description} #### , refer the provided internet search results reference:{reference}, The report should be well formatted using markdown format structured into subtopics as necessary",
|
52 |
-
"report_table": "Write a well thought out Report to the query:: {description},#### , refer the provided internet search results reference:{reference}. The report should be well formatted using markdown format, structured into subtopics, include tables or lists as needed to make it well readable"
|
53 |
-
},
|
54 |
-
"offline": {
|
55 |
-
"chat": "Write a well thought out, detailed and structured answer to the query:: {description}",
|
56 |
-
"report": "Write a well thought out, detailed and structured Report to the query:: {description}. The report should be well formatted using markdown format, structured into subtopics",
|
57 |
-
"report_table": "Write a detailed and structured Report to the query:: {description}, The report should be well formatted using markdown format, structured into subtopics, include tables or lists as needed to make it well readable"
|
58 |
-
}
|
59 |
-
}
|
60 |
-
|
61 |
-
# FastAPI app setup
|
62 |
app = FastAPI()
|
63 |
|
64 |
-
|
65 |
-
|
66 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
|
68 |
-
# Pydantic model for query parameters
|
69 |
class QueryModel(BaseModel):
|
70 |
-
user_query: str = Field(
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
internet: bool = Field(default=True, description="Enable Internet search")
|
76 |
-
output_format: str = Field(default="report_table", description="Output format for the report")
|
77 |
-
data_format: str = Field(default="Structured data", description="Type of data to extract from the internet")
|
78 |
-
generate_charts: bool = Field(default=False, description="Include generated charts")
|
79 |
-
output_as_md: bool = Field(default=False, description="Output report in markdown (default output in HTML)")
|
80 |
|
81 |
class Config:
|
82 |
schema_extra = {
|
83 |
"example": {
|
84 |
-
"user_query": "How
|
85 |
-
"
|
86 |
-
"description": "Provide a detailed report on the impacts of climate change on global biodiversity",
|
87 |
-
"user_id": "user123",
|
88 |
-
"user_name": "John Doe",
|
89 |
-
"internet": True,
|
90 |
-
"output_format": "report_table",
|
91 |
-
"data_format": "Structured data",
|
92 |
-
"generate_charts": True,
|
93 |
-
"output_as_md": False
|
94 |
}
|
95 |
}
|
96 |
|
97 |
@lru_cache()
|
98 |
def get_api_keys():
|
99 |
return {
|
100 |
-
"TOGETHER_API_KEY": os.getenv('TOGETHER_API_KEY'),
|
101 |
-
"BRAVE_API_KEY": os.getenv('BRAVE_API_KEY'),
|
102 |
-
"GROQ_API_KEY": os.getenv("GROQ_API_KEY"),
|
103 |
-
"HELICON_API_KEY": os.getenv("HELICON_API_KEY"),
|
104 |
-
"SUPABASE_USER": os.environ['SUPABASE_USER'],
|
105 |
-
"SUPABASE_PASSWORD": os.environ['SUPABASE_PASSWORD'],
|
106 |
"OPENROUTER_API_KEY": f"sk-or-v1-{os.environ['OPENROUTER_API_KEY']}"
|
107 |
}
|
108 |
|
109 |
-
|
110 |
-
|
111 |
-
urls, optimized_search_query, full_search_object = search_brave(search_query, num_results=8)
|
112 |
-
all_text_with_urls = fetch_and_extract_content(data_format, urls, optimized_search_query)
|
113 |
-
reference = limit_tokens(str(all_text_with_urls), token_limit=5000)
|
114 |
-
return all_text_with_urls, optimized_search_query, full_search_object, reference
|
115 |
|
116 |
-
|
117 |
-
|
118 |
-
"Convert the numerical data tables in the given content to embedded html plotly.js charts if appropriate, "
|
119 |
-
"use appropriate colors. Output format: <report>output the full content without any other changes in md "
|
120 |
-
f"format enclosed in tags like this</report> using the following: {md_report}"
|
121 |
-
)
|
122 |
-
messages = [{"role": 'user', "content": chart_prompt}]
|
123 |
-
return extract_data_from_tag(openrouter_response(messages, model="anthropic/claude-3.5-sonnet"), "report")
|
124 |
-
|
125 |
-
@cache(expire=604800)
|
126 |
-
async def generate_report(query: QueryModel, api_keys: Dict[str, str] = Depends(get_api_keys)):
|
127 |
-
internet_mode = "online" if query.internet else "offline"
|
128 |
-
user_prompt = PROMPT_TEMPLATES[internet_mode][query.output_format]
|
129 |
-
system_prompt = SYSTEM_PROMPTS[internet_mode]
|
130 |
-
|
131 |
-
all_text_with_urls = []
|
132 |
-
optimized_search_query = ""
|
133 |
-
full_search_object = {}
|
134 |
-
|
135 |
-
if query.internet:
|
136 |
-
try:
|
137 |
-
all_text_with_urls, optimized_search_query, full_search_object, reference = get_internet_data(query.description, query.data_format)
|
138 |
-
user_prompt = user_prompt.format(description=query.description, reference=reference)
|
139 |
-
except Exception as e:
|
140 |
-
print(f"Failed to search/scrape results: {e}")
|
141 |
-
internet_mode = "offline"
|
142 |
-
user_prompt = PROMPT_TEMPLATES[internet_mode][query.output_format].format(description=query.description)
|
143 |
-
system_prompt = SYSTEM_PROMPTS[internet_mode]
|
144 |
-
else:
|
145 |
-
user_prompt = user_prompt.format(description=query.description)
|
146 |
-
|
147 |
-
md_report = together_response(user_prompt, model=LLM_MODELS["default"]["medium"], SysPrompt=system_prompt)
|
148 |
-
|
149 |
-
if query.generate_charts and has_tables(md_to_html(md_report)):
|
150 |
-
try:
|
151 |
-
md_report = generate_charts(md_report)
|
152 |
-
except Exception as e:
|
153 |
-
print(f"Failed to generate charts: {e}")
|
154 |
-
|
155 |
-
if query.user_id != "test":
|
156 |
-
insert_data(query.user_id, query.topic, query.description, str(all_text_with_urls), md_report)
|
157 |
-
|
158 |
-
references_html = {url: str(md_to_html(text)) for text, url in all_text_with_urls}
|
159 |
-
final_report = md_report if query.output_as_md else md_to_html(md_report)
|
160 |
-
|
161 |
-
return {
|
162 |
-
"report": final_report,
|
163 |
-
"references": references_html,
|
164 |
-
"search_query": optimized_search_query,
|
165 |
-
"search_data_full": full_search_object
|
166 |
-
}
|
167 |
-
|
168 |
-
@app.post("/generate_report", response_model=Dict[str, Any])
|
169 |
-
async def api_generate_report(query: QueryModel, api_keys: Dict[str, str] = Depends(get_api_keys)):
|
170 |
try:
|
171 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
172 |
except Exception as e:
|
173 |
-
raise HTTPException(status_code=500, detail=str(e))
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI, HTTPException
|
2 |
+
from fastapi.responses import StreamingResponse
|
3 |
+
from pydantic import BaseModel, Field
|
4 |
+
from typing import Literal
|
5 |
import os
|
|
|
|
|
|
|
6 |
from functools import lru_cache
|
7 |
+
from openai import OpenAI
|
8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
app = FastAPI()
|
10 |
|
11 |
+
ModelID = Literal[
|
12 |
+
"meta-llama/llama-3-70b-instruct",
|
13 |
+
"anthropic/claude-3.5-sonnet",
|
14 |
+
"deepseek/deepseek-coder",
|
15 |
+
"anthropic/claude-3-haiku",
|
16 |
+
"openai/gpt-3.5-turbo-instruct",
|
17 |
+
"qwen/qwen-72b-chat",
|
18 |
+
"google/gemma-2-27b-it"
|
19 |
+
]
|
20 |
|
|
|
21 |
class QueryModel(BaseModel):
|
22 |
+
user_query: str = Field(..., description="User's coding query")
|
23 |
+
model_id: ModelID = Field(
|
24 |
+
default="meta-llama/llama-3-70b-instruct",
|
25 |
+
description="ID of the model to use for response generation"
|
26 |
+
)
|
|
|
|
|
|
|
|
|
|
|
27 |
|
28 |
class Config:
|
29 |
schema_extra = {
|
30 |
"example": {
|
31 |
+
"user_query": "How do I implement a binary search in Python?",
|
32 |
+
"model_id": "meta-llama/llama-3-70b-instruct"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
}
|
34 |
}
|
35 |
|
36 |
@lru_cache()
|
37 |
def get_api_keys():
|
38 |
return {
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
"OPENROUTER_API_KEY": f"sk-or-v1-{os.environ['OPENROUTER_API_KEY']}"
|
40 |
}
|
41 |
|
42 |
+
api_keys = get_api_keys()
|
43 |
+
or_client = OpenAI(api_key=api_keys["OPENROUTER_API_KEY"], base_url="https://openrouter.ai/api/v1")
|
|
|
|
|
|
|
|
|
44 |
|
45 |
+
@lru_cache()
|
46 |
+
def chat_with_llama_stream(messages, model, max_output_tokens=4000):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
try:
|
48 |
+
response = or_client.chat.completions.create(
|
49 |
+
model=model,
|
50 |
+
messages=messages,
|
51 |
+
max_tokens=max_output_tokens,
|
52 |
+
stream=True
|
53 |
+
)
|
54 |
+
for chunk in response:
|
55 |
+
if chunk.choices[0].delta.content is not None:
|
56 |
+
yield chunk.choices[0].delta.content
|
57 |
except Exception as e:
|
58 |
+
raise HTTPException(status_code=500, detail=f"Error in model response: {str(e)}")
|
59 |
+
|
60 |
+
@app.post("/coding-assistant")
|
61 |
+
async def coding_assistant(query: QueryModel):
|
62 |
+
"""
|
63 |
+
Coding assistant endpoint that provides programming help based on user queries.
|
64 |
+
|
65 |
+
Available models:
|
66 |
+
- meta-llama/llama-3-70b-instruct (default)
|
67 |
+
- anthropic/claude-3.5-sonnet
|
68 |
+
- deepseek/deepseek-coder
|
69 |
+
- anthropic/claude-3-haiku
|
70 |
+
- openai/gpt-3.5-turbo-instruct
|
71 |
+
- qwen/qwen-72b-chat
|
72 |
+
- google/gemma-2-27b-it
|
73 |
+
"""
|
74 |
+
system_prompt = "You are a helpful assistant proficient in coding tasks. Help the user in understanding and writing code."
|
75 |
+
messages = [
|
76 |
+
{"role": "system", "content": system_prompt},
|
77 |
+
{"role": "user", "content": query.user_query}
|
78 |
+
]
|
79 |
+
|
80 |
+
return StreamingResponse(
|
81 |
+
chat_with_llama_stream(messages, model=query.model_id),
|
82 |
+
media_type="text/event-stream"
|
83 |
+
)
|