general_chat / document_generator_v2.py
pvanand's picture
Update document_generator_v2.py
7dc1282 verified
raw
history blame
27 kB
# File: prompts.py
DOCUMENT_OUTLINE_PROMPT_SYSTEM = """You are a document generator. Provide the outline of the document requested in <prompt></prompt> in JSON format.
Include sections and subsections if required. Use the "Content" field to provide a specific prompt or instruction for generating content for that particular section or subsection.
make sure the Sections follow a logical flow and each prompt's content does not overlap with other sections.
OUTPUT IN FOLLOWING JSON FORMAT enclosed in <output> tags
<output>
{
"Document": {
"Title": "Document Title",
"Author": "Author Name",
"Date": "YYYY-MM-DD",
"Version": "1.0",
"Sections": [
{
"SectionNumber": "1",
"Title": "Section Title",
"Content": "Specific prompt or instruction for generating content for this section",
"Subsections": [
{
"SectionNumber": "1.1",
"Title": "Subsection Title",
"Content": "Specific prompt or instruction for generating content for this subsection"
}
]
}
]
}
}
</output>"""
DOCUMENT_OUTLINE_PROMPT_USER = """<prompt>{query}</prompt>"""
DOCUMENT_SECTION_PROMPT_SYSTEM = """You are a document generator, You need to output only the content requested in the section in the prompt.
FORMAT YOUR OUTPUT AS MARKDOWN ENCLOSED IN <response></response> tags
<overall_objective>{overall_objective}</overall_objective>
<document_layout>{document_layout}</document_layout>"""
DOCUMENT_SECTION_PROMPT_USER = """<prompt>Output the content for the section "{section_or_subsection_title}" formatted as markdown. Follow this instruction: {content_instruction}</prompt>"""
##########################################
DOCUMENT_TEMPLATE_OUTLINE_PROMPT_SYSTEM = """You are a document template generator. Provide the outline of the document requested in <prompt></prompt> in JSON format.
Include sections and subsections if required. Use the "Content" field to provide a specific prompt or instruction for generating template with placeholder text /example content for that particular section or subsection. Specify in each prompt to output as a template and use placeholder text/ tables as necessory.
make sure the Sections follow a logical flow and each prompt's content does not overlap with other sections.
OUTPUT IN FOLLOWING JSON FORMAT enclosed in <output> tags
<output>
{
"Document": {
"Title": "Document Title",
"Author": "Author Name",
"Date": "YYYY-MM-DD",
"Version": "1.0",
"Sections": [
{
"SectionNumber": "1",
"Title": "Section Title",
"Content": "Specific prompt or instruction for generating template for this section",
"Subsections": [
{
"SectionNumber": "1.1",
"Title": "Subsection Title",
"Content": "Specific prompt or instruction for generating template for this subsection"
}
]
}
]
}
}
</output>"""
DOCUMENT_TEMPLATE_PROMPT_USER = """<prompt>{query}</prompt>"""
DOCUMENT_TEMPLATE_SECTION_PROMPT_SYSTEM = """You are a document template generator,You need to output only the content requested in the section in the prompt, Use placeholder text/examples/tables wherever required.
FORMAT YOUR OUTPUT AS A TEMPLATE ENCLOSED IN <response></response> tags
<overall_objective>{overall_objective}</overall_objective>
<document_layout>{document_layout}</document_layout>"""
DOCUMENT_TEMPLATE_SECTION_PROMPT_USER = """<prompt>Output the content for the section "{section_or_subsection_title}" formatted as markdown. Follow this instruction: {content_instruction}</prompt>"""
# File: llm_observability.py
import sqlite3
import json
from datetime import datetime
from typing import Dict, Any, List, Optional
class LLMObservabilityManager:
def __init__(self, db_path: str = "llm_observability_v2.db"):
self.db_path = db_path
self.create_table()
def create_table(self):
with sqlite3.connect(self.db_path) as conn:
cursor = conn.cursor()
cursor.execute('''
CREATE TABLE IF NOT EXISTS llm_observations (
id TEXT PRIMARY KEY,
conversation_id TEXT,
created_at DATETIME,
status TEXT,
request TEXT,
response TEXT,
model TEXT,
total_tokens INTEGER,
prompt_tokens INTEGER,
completion_tokens INTEGER,
latency FLOAT,
user TEXT
)
''')
def insert_observation(self, response: Dict[str, Any], conversation_id: str, status: str, request: str, latency: float, user: str):
created_at = datetime.fromtimestamp(response['created'])
with sqlite3.connect(self.db_path) as conn:
cursor = conn.cursor()
cursor.execute('''
INSERT INTO llm_observations
(id, conversation_id, created_at, status, request, response, model, total_tokens, prompt_tokens, completion_tokens, latency, user)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
''', (
response['id'],
conversation_id,
created_at,
status,
request,
json.dumps(response['choices'][0]['message']),
response['model'],
response['usage']['total_tokens'],
response['usage']['prompt_tokens'],
response['usage']['completion_tokens'],
latency,
user
))
def get_observations(self, conversation_id: Optional[str] = None) -> List[Dict[str, Any]]:
with sqlite3.connect(self.db_path) as conn:
cursor = conn.cursor()
if conversation_id:
cursor.execute('SELECT * FROM llm_observations WHERE conversation_id = ? ORDER BY created_at', (conversation_id,))
else:
cursor.execute('SELECT * FROM llm_observations ORDER BY created_at')
rows = cursor.fetchall()
column_names = [description[0] for description in cursor.description]
return [dict(zip(column_names, row)) for row in rows]
def get_all_observations(self) -> List[Dict[str, Any]]:
return self.get_observations()
# File: app.py
import os
import json
import re
import asyncio
import time
from typing import List, Dict, Optional, Any, Callable, Union
from openai import OpenAI
import logging
import functools
from fastapi import APIRouter, HTTPException, Request, UploadFile, File, Depends
from fastapi.responses import StreamingResponse
from pydantic import BaseModel
from fastapi_cache import FastAPICache
from fastapi_cache.decorator import cache
import psycopg2
from datetime import datetime
import base64
from fastapi import Form
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
def log_execution(func: Callable) -> Callable:
@functools.wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Any:
logger.info(f"Executing {func.__name__}")
try:
result = func(*args, **kwargs)
logger.info(f"{func.__name__} completed successfully")
return result
except Exception as e:
logger.error(f"Error in {func.__name__}: {e}")
raise
return wrapper
# aiclient.py
class AIClient:
def __init__(self):
self.client = OpenAI(
base_url="https://openrouter.ai/api/v1",
api_key="sk-or-v1-" + os.environ['OPENROUTER_API_KEY']
)
self.observability_manager = LLMObservabilityManager()
@log_execution
def generate_response(
self,
messages: List[Dict[str, str]],
model: str = "openai/gpt-4o-mini",
max_tokens: int = 32000,
conversation_id: str = None,
user: str = "anonymous"
) -> Optional[str]:
if not messages:
return None
start_time = time.time()
response = self.client.chat.completions.create(
model=model,
messages=messages,
max_tokens=max_tokens,
stream=False
)
end_time = time.time()
latency = end_time - start_time
# Log the observation
self.observability_manager.insert_observation(
response=response.dict(),
conversation_id=conversation_id or "default",
status="success",
request=json.dumps(messages),
latency=latency,
user=user
)
return response.choices[0].message.content
@log_execution
def generate_vision_response(
self,
messages: List[Dict[str, Union[str, List[Dict[str, Union[str, Dict[str, str]]]]]]],
model: str = "google/gemini-flash-1.5-8b",
max_tokens: int = 32000,
conversation_id: str = None,
user: str = "anonymous"
) -> Optional[str]:
if not messages:
return None
start_time = time.time()
response = self.client.chat.completions.create(
model=model,
messages=messages,
max_tokens=max_tokens,
stream=False
)
end_time = time.time()
latency = end_time - start_time
# Log the observation
self.observability_manager.insert_observation(
response=response.dict(),
conversation_id=conversation_id or "default",
status="success",
request=json.dumps(messages),
latency=latency,
user=user
)
return response.choices[0].message.content
class VisionTools:
def __init__(self, ai_client):
self.ai_client = ai_client
async def extract_images_info(self, images: List[UploadFile]) -> str:
try:
image_contents = []
for image in images:
image_content = await image.read()
base64_image = base64.b64encode(image_content).decode('utf-8')
image_contents.append({
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{base64_image}"
}
})
messages = [
{
"role": "user",
"content": [
{
"type": "text",
"text": "Extract the contents of these images in detail in a structured format, focusing on any text, tables, diagrams, or visual elements that might be relevant for document generation."
},
*image_contents
]
}
]
image_context = self.ai_client.generate_vision_response(messages)
return image_context
except Exception as e:
print(f"Error processing images: {str(e)}")
return ""
class DatabaseManager:
"""Manages database operations."""
def __init__(self):
self.db_params = {
"dbname": "postgres",
"user": os.environ['SUPABASE_USER'],
"password": os.environ['SUPABASE_PASSWORD'],
"host": "aws-0-us-west-1.pooler.supabase.com",
"port": "5432"
}
@log_execution
def update_database(self, user_id: str, user_query: str, response: str) -> None:
with psycopg2.connect(**self.db_params) as conn:
with conn.cursor() as cur:
insert_query = """
INSERT INTO ai_document_generator (user_id, user_query, response)
VALUES (%s, %s, %s);
"""
cur.execute(insert_query, (user_id, user_query, response))
class DocumentGenerator:
def __init__(self, ai_client: AIClient):
self.ai_client = ai_client
self.document_outline = None
self.content_messages = []
@staticmethod
def extract_between_tags(text: str, tag: str) -> str:
pattern = f"<{tag}>(.*?)</{tag}>"
match = re.search(pattern, text, re.DOTALL)
return match.group(1).strip() if match else ""
@staticmethod
def remove_duplicate_title(content: str, title: str, section_number: str) -> str:
patterns = [
rf"^#+\s*{re.escape(section_number)}(?:\s+|\s*:\s*|\.\s*){re.escape(title)}",
rf"^#+\s*{re.escape(title)}",
rf"^{re.escape(section_number)}(?:\s+|\s*:\s*|\.\s*){re.escape(title)}",
rf"^{re.escape(title)}",
]
for pattern in patterns:
content = re.sub(pattern, "", content, flags=re.MULTILINE | re.IGNORECASE)
return content.lstrip()
@log_execution
def generate_document_outline(self, query: str, template: bool = False, image_context: str = "", max_retries: int = 3) -> Optional[Dict]:
messages = [
{"role": "system", "content": DOCUMENT_OUTLINE_PROMPT_SYSTEM if not template else DOCUMENT_TEMPLATE_OUTLINE_PROMPT_SYSTEM},
{"role": "user", "content": DOCUMENT_OUTLINE_PROMPT_USER.format(query=query) if not template else DOCUMENT_TEMPLATE_PROMPT_USER.format(query=query, image_context=image_context)}
]
# Update user content to include image context if provided
if image_context:
messages[1]["content"] += f"<attached_images>\n\n{image_context}\n\n</attached_images>"
for attempt in range(max_retries):
outline_response = self.ai_client.generate_response(messages, model="openai/gpt-4o")
outline_json_text = self.extract_between_tags(outline_response, "output")
try:
self.document_outline = json.loads(outline_json_text)
return self.document_outline
except json.JSONDecodeError as e:
if attempt < max_retries - 1:
logger.warning(f"Failed to parse JSON (attempt {attempt + 1}): {e}")
logger.info("Retrying...")
else:
logger.error(f"Failed to parse JSON after {max_retries} attempts: {e}")
return None
@log_execution
def generate_content(self, title: str, content_instruction: str, section_number: str, template: bool = False) -> str:
SECTION_PROMPT_USER = DOCUMENT_SECTION_PROMPT_USER if not template else DOCUMENT_TEMPLATE_SECTION_PROMPT_USER
self.content_messages.append({
"role": "user",
"content": SECTION_PROMPT_USER.format(
section_or_subsection_title=title,
content_instruction=content_instruction
)
})
section_response = self.ai_client.generate_response(self.content_messages)
content = self.extract_between_tags(section_response, "response")
content = self.remove_duplicate_title(content, title, section_number)
self.content_messages.append({
"role": "assistant",
"content": section_response
})
return content
class MarkdownConverter:
@staticmethod
def slugify(text: str) -> str:
return re.sub(r'\W+', '-', text.lower())
@classmethod
def generate_toc(cls, sections: List[Dict]) -> str:
toc = "<div style='page-break-before: always;'></div>\n\n"
toc += "<h2 style='color: #2c3e50; text-align: center;'>Table of Contents</h2>\n\n"
toc += "<nav style='background-color: #f8f9fa; padding: 20px; border-radius: 5px; line-height: 1.6;'>\n\n"
for section in sections:
section_number = section['SectionNumber']
section_title = section['Title']
toc += f"<p><a href='#{cls.slugify(section_title)}' style='color: #3498db; text-decoration: none;'>{section_number}. {section_title}</a></p>\n\n"
for subsection in section.get('Subsections', []):
subsection_number = subsection['SectionNumber']
subsection_title = subsection['Title']
toc += f"<p style='margin-left: 20px;'><a href='#{cls.slugify(subsection_title)}' style='color: #2980b9; text-decoration: none;'>{subsection_number} {subsection_title}</a></p>\n\n"
toc += "</nav>\n\n"
return toc
@classmethod
def convert_to_markdown(cls, document: Dict) -> str:
markdown = "<div style='text-align: center; padding-top: 33vh;'>\n\n"
markdown += f"<h1 style='color: #2c3e50; border-bottom: 2px solid #3498db; padding-bottom: 10px; display: inline-block;'>{document['Title']}</h1>\n\n"
markdown += f"<p style='color: #7f8c8d;'><em>By {document['Author']}</em></p>\n\n"
markdown += f"<p style='color: #95a5a6;'>Version {document['Version']} | {document['Date']}</p>\n\n"
markdown += "</div>\n\n"
markdown += cls.generate_toc(document['Sections'])
markdown += "<div style='max-width: 800px; margin: 0 auto; font-family: \"Segoe UI\", Arial, sans-serif; line-height: 1.6;'>\n\n"
for section in document['Sections']:
markdown += "<div style='page-break-before: always;'></div>\n\n"
section_number = section['SectionNumber']
section_title = section['Title']
markdown += f"<h2 id='{cls.slugify(section_title)}' style='color: #2c3e50; border-bottom: 1px solid #bdc3c7; padding-bottom: 5px;'>{section_number}. {section_title}</h2>\n\n"
markdown += f"<div style='color: #34495e; margin-bottom: 20px;'>\n\n{section['Content']}\n\n</div>\n\n"
for subsection in section.get('Subsections', []):
subsection_number = subsection['SectionNumber']
subsection_title = subsection['Title']
markdown += f"<h3 id='{cls.slugify(subsection_title)}' style='color: #34495e;'>{subsection_number} {subsection_title}</h3>\n\n"
markdown += f"<div style='color: #34495e; margin-bottom: 20px;'>\n\n{subsection['Content']}\n\n</div>\n\n"
markdown += "</div>"
return markdown
router = APIRouter()
class JsonDocumentResponse(BaseModel):
json_document: Dict
# class JsonDocumentRequest(BaseModel):
# query: str
# template: bool = False
# images: Optional[List[UploadFile]] = File(None)
# documents: Optional[List[UploadFile]] = File(None)
# conversation_id: str = ""
class MarkdownDocumentRequest(BaseModel):
json_document: Dict
query: str
template: bool = False
conversation_id: str = ""
MESSAGE_DELIMITER = b"\n---DELIMITER---\n"
def yield_message(message):
message_json = json.dumps(message, ensure_ascii=False).encode('utf-8')
return message_json + MESSAGE_DELIMITER
async def generate_document_stream(document_generator: DocumentGenerator, document_outline: Dict, query: str, template: bool = False, conversation_id: str = ""):
document_generator.document_outline = document_outline
db_manager = DatabaseManager()
overall_objective = query
document_layout = json.dumps(document_generator.document_outline, indent=2)
cache_key = f"image_context_{conversation_id}"
image_context = await FastAPICache.get_backend().get(cache_key)
SECTION_PROMPT_SYSTEM = DOCUMENT_SECTION_PROMPT_SYSTEM if not template else DOCUMENT_TEMPLATE_SECTION_PROMPT_SYSTEM
document_generator.content_messages = [
{
"role": "system",
"content": SECTION_PROMPT_SYSTEM.format(
overall_objective=overall_objective,
document_layout=document_layout
)
}
]
if image_context:
document_generator.content_messages[0]["content"] += f"<attached_images>\n\n{image_context}\n\n</attached_images>"
for section in document_generator.document_outline["Document"].get("Sections", []):
section_title = section.get("Title", "")
section_number = section.get("SectionNumber", "")
content_instruction = section.get("Content", "")
logging.info(f"Generating content for section: {section_title}")
content = document_generator.generate_content(section_title, content_instruction, section_number, template)
section["Content"] = content
yield yield_message({
"type": "document_section",
"content": {
"section_number": section_number,
"section_title": section_title,
"content": content
}
})
for subsection in section.get("Subsections", []):
subsection_title = subsection.get("Title", "")
subsection_number = subsection.get("SectionNumber", "")
subsection_content_instruction = subsection.get("Content", "")
logging.info(f"Generating content for subsection: {subsection_title}")
content = document_generator.generate_content(subsection_title, subsection_content_instruction, subsection_number, template)
subsection["Content"] = content
yield yield_message({
"type": "document_section",
"content": {
"section_number": subsection_number,
"section_title": subsection_title,
"content": content
}
})
markdown_document = MarkdownConverter.convert_to_markdown(document_generator.document_outline["Document"])
yield yield_message({
"type": "complete_document",
"content": {
"markdown": markdown_document,
"json": document_generator.document_outline
},
});
db_manager.update_database("elevatics", query, markdown_document)
@router.post("/generate-document/markdown-stream")
async def generate_markdown_document_stream_endpoint(request: MarkdownDocumentRequest):
ai_client = AIClient()
document_generator = DocumentGenerator(ai_client)
async def stream_generator():
try:
async for chunk in generate_document_stream(document_generator, request.json_document, request.query, request.template, request.conversation_id):
yield chunk
except Exception as e:
yield yield_message({
"type": "error",
"content": str(e)
})
return StreamingResponse(stream_generator(), media_type="application/octet-stream")
@cache(expire=600*24*7)
@router.post("/generate-document/json", response_model=JsonDocumentResponse)
async def generate_document_outline_endpoint(
query: str = Form(...),
template: bool = Form(False),
conversation_id: str = Form(...),
images: Optional[List[UploadFile]] = File(None),
documents: Optional[List[UploadFile]] = File(None)
):
ai_client = AIClient()
document_generator = DocumentGenerator(ai_client)
vision_tools = VisionTools(ai_client)
try:
image_context = ""
if images:
image_context = await vision_tools.extract_images_info(images)
# Store the image_context in the cache
cache_key = f"image_context_{conversation_id}"
await FastAPICache.get_backend().set(cache_key, image_context, expire=3600) # Cache for 1 hour
json_document = document_generator.generate_document_outline(
query,
template,
image_context=image_context
)
if json_document is None:
raise HTTPException(status_code=500, detail="Failed to generate a valid document outline")
return JsonDocumentResponse(json_document=json_document)
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
## OBSERVABILITY
from uuid import uuid4
import csv
from io import StringIO
class ObservationResponse(BaseModel):
observations: List[Dict]
def create_csv_response(observations: List[Dict]) -> StreamingResponse:
def iter_csv(data):
output = StringIO()
writer = csv.DictWriter(output, fieldnames=data[0].keys() if data else [])
writer.writeheader()
for row in data:
writer.writerow(row)
output.seek(0)
yield output.read()
headers = {
'Content-Disposition': 'attachment; filename="observations.csv"'
}
return StreamingResponse(iter_csv(observations), media_type="text/csv", headers=headers)
@router.get("/last-observations/{limit}")
async def get_last_observations(limit: int = 10, format: str = "json"):
observability_manager = LLMObservabilityManager()
try:
# Get all observations, sorted by created_at in descending order
all_observations = observability_manager.get_observations()
all_observations.sort(key=lambda x: x['created_at'], reverse=True)
# Get the last conversation_id
if all_observations:
last_conversation_id = all_observations[0]['conversation_id']
# Filter observations for the last conversation
last_conversation_observations = [
obs for obs in all_observations
if obs['conversation_id'] == last_conversation_id
][:limit]
if format.lower() == "csv":
return create_csv_response(last_conversation_observations)
else:
return ObservationResponse(observations=last_conversation_observations)
else:
if format.lower() == "csv":
return create_csv_response([])
else:
return ObservationResponse(observations=[])
except Exception as e:
raise HTTPException(status_code=500, detail=f"Failed to retrieve observations: {str(e)}")
## TEST CACHE
class CacheItem(BaseModel):
key: str
value: str
@router.post("/set-cache")
async def set_cache(item: CacheItem):
try:
# Set the cache with a default expiration of 1 hour (3600 seconds)
await FastAPICache.get_backend().set(item.key, item.value, expire=3600)
return {"message": f"Cache set for key: {item.key}"}
except Exception as e:
raise HTTPException(status_code=500, detail=f"Failed to set cache: {str(e)}")
@router.get("/get-cache/{key}")
async def get_cache(key: str):
try:
value = await FastAPICache.get_backend().get(key)
if value is None:
raise HTTPException(status_code=404, detail=f"No cache found for key: {key}")
return {"key": key, "value": value}
except Exception as e:
raise HTTPException(status_code=500, detail=f"Failed to get cache: {str(e)}")