pvanand commited on
Commit
5a0520f
1 Parent(s): c7ec934

Create document_generator_v4.py

Browse files
Files changed (1) hide show
  1. document_generator_v4.py +657 -0
document_generator_v4.py ADDED
@@ -0,0 +1,657 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File: prompts.py
2
+
3
+ DOCUMENT_OUTLINE_PROMPT_SYSTEM = """You are a document generator. Provide the outline of the document requested in <prompt></prompt> in JSON format.
4
+ Include sections and subsections if required. Use the "Content" field to provide a specific prompt or instruction for generating content for that particular section or subsection.
5
+ make sure the Pages follow a logical flow and each prompt's content does not overlap with other pages.
6
+ OUTPUT IN FOLLOWING JSON FORMAT enclosed in <output> tags
7
+ <output>
8
+ {
9
+ "Document": {
10
+ "Title": "Document Title",
11
+ "Author": "Author Name",
12
+ "Date": "YYYY-MM-DD",
13
+ "Version": "1.0",
14
+
15
+ "Pages": [
16
+ {
17
+ "PageNumber": "1",
18
+ "Title": "Section Title",
19
+ "Content": "overview", # Optional: Short overview of the Section, if not required leave it as "" empty string
20
+ "Subsections": [
21
+ {
22
+ "PageNumber": "1.1",
23
+ "Title": "Subsection Title",
24
+ "Content": "Specific prompt or instruction for generating content for this subsection"
25
+ }
26
+ ]
27
+ }
28
+ ]
29
+ }
30
+ }
31
+ </output>"""
32
+
33
+ DOCUMENT_OUTLINE_PROMPT_USER = """Generate a document outline {num_pages}for the following query: <prompt>{query}</prompt>"""
34
+
35
+ DOCUMENT_SECTION_PROMPT_SYSTEM = """You are a document generator, replace the section/subsection prompts with the requested content.
36
+ OUTPUT AS A WELL FORMATED DOCUMENT ENCLOSED IN <response></response> tags
37
+ <overall_objective>{overall_objective}</overall_objective>
38
+ <document_layout>{document_layout}</document_layout>"""
39
+
40
+ DOCUMENT_SECTION_PROMPT_USER = """<prompt>Output the content requested formatted as markdown. Follow the instructions below title/subtitle to replace it with appropriate content: {content_instruction}</prompt>"""
41
+
42
+ ##########################################
43
+
44
+ DOCUMENT_TEMPLATE_OUTLINE_PROMPT_SYSTEM = """You are a document template generator. Provide the outline of the document requested in <prompt></prompt> in JSON format.
45
+ Include sections and subsections if required. Use the "Content" field to provide a specific prompt or instruction for generating template with placeholder text /example content for that particular section or subsection. Specify in each prompt to output as a template and use placeholder text/ tables as necessory.
46
+ make sure the Sections follow a logical flow and each prompt's content does not overlap with other sections.
47
+ OUTPUT IN FOLLOWING JSON FORMAT enclosed in <output> tags
48
+ <output>
49
+ {
50
+ "Document": {
51
+ "Title": "Document Title",
52
+ "Author": "Author Name",
53
+ "Date": "YYYY-MM-DD",
54
+ "Version": "1.0",
55
+
56
+ "Pages": [
57
+ {
58
+ "PageNumber": "1",
59
+ "Title": "Section Title",
60
+ "Content": "Specific prompt or instruction for generating template for this section",
61
+ "Subsections": [
62
+ {
63
+ "PageNumber": "1.1",
64
+ "Title": "Subsection Title",
65
+ "Content": "Specific prompt or instruction for generating template for this subsection"
66
+ }
67
+ ]
68
+ }
69
+ ]
70
+ }
71
+ }
72
+ </output>"""
73
+
74
+ DOCUMENT_TEMPLATE_PROMPT_USER = """Generate a document template outline {num_pages} for the following query:<prompt>{query}</prompt>"""
75
+
76
+ DOCUMENT_TEMPLATE_SECTION_PROMPT_SYSTEM = """You are a document template generator, replace the section/subsection prompts with the requested content, Use placeholder text/examples/tables wherever required.
77
+ FORMAT YOUR OUTPUT AS A TEMPLATE ENCLOSED IN <response></response> tags
78
+ <overall_objective>{overall_objective}</overall_objective>
79
+ <document_layout>{document_layout}</document_layout>"""
80
+
81
+ DOCUMENT_TEMPLATE_SECTION_PROMPT_USER = """<prompt>Output the content requested formatted as markdown. Follow the instructions below title/subtitle to replace it with appropriate content: {content_instruction}</prompt>"""
82
+
83
+
84
+ # File: llm_observability.py
85
+
86
+ import sqlite3
87
+ import json
88
+ from datetime import datetime
89
+ from typing import Dict, Any, List, Optional
90
+
91
+ class LLMObservabilityManager:
92
+ def __init__(self, db_path: str = "llm_observability_v2.db"):
93
+ self.db_path = db_path
94
+ self.create_table()
95
+
96
+ def create_table(self):
97
+ with sqlite3.connect(self.db_path) as conn:
98
+ cursor = conn.cursor()
99
+ cursor.execute('''
100
+ CREATE TABLE IF NOT EXISTS llm_observations (
101
+ id TEXT PRIMARY KEY,
102
+ conversation_id TEXT,
103
+ created_at DATETIME,
104
+ status TEXT,
105
+ request TEXT,
106
+ response TEXT,
107
+ model TEXT,
108
+ total_tokens INTEGER,
109
+ prompt_tokens INTEGER,
110
+ completion_tokens INTEGER,
111
+ latency FLOAT,
112
+ user TEXT
113
+ )
114
+ ''')
115
+
116
+ def insert_observation(self, response: Dict[str, Any], conversation_id: str, status: str, request: str, latency: float, user: str):
117
+ created_at = datetime.fromtimestamp(response['created'])
118
+
119
+ with sqlite3.connect(self.db_path) as conn:
120
+ cursor = conn.cursor()
121
+ cursor.execute('''
122
+ INSERT INTO llm_observations
123
+ (id, conversation_id, created_at, status, request, response, model, total_tokens, prompt_tokens, completion_tokens, latency, user)
124
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
125
+ ''', (
126
+ response['id'],
127
+ conversation_id,
128
+ created_at,
129
+ status,
130
+ request,
131
+ json.dumps(response['choices'][0]['message']),
132
+ response['model'],
133
+ response['usage']['total_tokens'],
134
+ response['usage']['prompt_tokens'],
135
+ response['usage']['completion_tokens'],
136
+ latency,
137
+ user
138
+ ))
139
+
140
+ def get_observations(self, conversation_id: Optional[str] = None) -> List[Dict[str, Any]]:
141
+ with sqlite3.connect(self.db_path) as conn:
142
+ cursor = conn.cursor()
143
+ if conversation_id:
144
+ cursor.execute('SELECT * FROM llm_observations WHERE conversation_id = ? ORDER BY created_at', (conversation_id,))
145
+ else:
146
+ cursor.execute('SELECT * FROM llm_observations ORDER BY created_at')
147
+ rows = cursor.fetchall()
148
+
149
+ column_names = [description[0] for description in cursor.description]
150
+ return [dict(zip(column_names, row)) for row in rows]
151
+
152
+ def get_all_observations(self) -> List[Dict[str, Any]]:
153
+ return self.get_observations()
154
+
155
+
156
+ # File: app.py
157
+ import os
158
+ import json
159
+ import re
160
+ import asyncio
161
+ import time
162
+ from typing import List, Dict, Optional, Any, Callable, Union
163
+ from openai import AsyncOpenAI
164
+ import logging
165
+ import functools
166
+ from fastapi import APIRouter, HTTPException, Request, UploadFile, File, Depends
167
+ from fastapi.responses import StreamingResponse
168
+ from pydantic import BaseModel
169
+ from fastapi_cache import FastAPICache
170
+ from fastapi_cache.decorator import cache
171
+ import psycopg2
172
+ from datetime import datetime
173
+ import base64
174
+ from fastapi import Form
175
+
176
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
177
+ logger = logging.getLogger(__name__)
178
+
179
+ def log_execution(func: Callable) -> Callable:
180
+ @functools.wraps(func)
181
+ def wrapper(*args: Any, **kwargs: Any) -> Any:
182
+ logger.info(f"Executing {func.__name__}")
183
+ try:
184
+ result = func(*args, **kwargs)
185
+ logger.info(f"{func.__name__} completed successfully")
186
+ return result
187
+ except Exception as e:
188
+ logger.error(f"Error in {func.__name__}: {e}")
189
+ raise
190
+ return wrapper
191
+
192
+ # aiclient.py
193
+
194
+ class AIClient:
195
+ def __init__(self):
196
+ self.client = AsyncOpenAI(
197
+ base_url="https://openrouter.ai/api/v1",
198
+ api_key="sk-or-v1-" + os.environ['OPENROUTER_API_KEY']
199
+ )
200
+ self.observability_manager = LLMObservabilityManager()
201
+
202
+ @log_execution
203
+ async def generate_response(
204
+ self,
205
+ messages: List[Dict[str, str]],
206
+ model: str = "openai/gpt-4o-mini",
207
+ max_tokens: int = 32000,
208
+ conversation_id: str = None,
209
+ user: str = "anonymous"
210
+ ) -> Optional[str]:
211
+ if not messages:
212
+ return None
213
+
214
+ start_time = time.time()
215
+ response = await self.client.chat.completions.create(
216
+ model=model,
217
+ messages=messages,
218
+ max_tokens=max_tokens
219
+ )
220
+ end_time = time.time()
221
+ latency = end_time - start_time
222
+
223
+ # Log the observation
224
+ self.observability_manager.insert_observation(
225
+ response=response.dict(),
226
+ conversation_id=conversation_id or "default",
227
+ status="success",
228
+ request=json.dumps(messages),
229
+ latency=latency,
230
+ user=user
231
+ )
232
+
233
+ return response.choices[0].message.content
234
+
235
+ @log_execution
236
+ async def generate_vision_response(
237
+ self,
238
+ messages: List[Dict[str, Union[str, List[Dict[str, Union[str, Dict[str, str]]]]]]],
239
+ model: str = "google/gemini-flash-1.5-8b",
240
+ max_tokens: int = 32000,
241
+ conversation_id: str = None,
242
+ user: str = "anonymous"
243
+ ) -> Optional[str]:
244
+ if not messages:
245
+ return None
246
+
247
+ start_time = time.time()
248
+ response = await self.client.chat.completions.create(
249
+ model=model,
250
+ messages=messages,
251
+ max_tokens=max_tokens
252
+ )
253
+ end_time = time.time()
254
+ latency = end_time - start_time
255
+
256
+ # Log the observation
257
+ self.observability_manager.insert_observation(
258
+ response=response.dict(),
259
+ conversation_id=conversation_id or "default",
260
+ status="success",
261
+ request=json.dumps(messages),
262
+ latency=latency,
263
+ user=user
264
+ )
265
+
266
+ return response.choices[0].message.content
267
+
268
+
269
+ class VisionTools:
270
+ def __init__(self, ai_client):
271
+ self.ai_client = ai_client
272
+
273
+ async def extract_images_info(self, images: List[UploadFile]) -> str:
274
+ try:
275
+ image_contents = []
276
+ for image in images:
277
+ image_content = await image.read()
278
+ base64_image = base64.b64encode(image_content).decode('utf-8')
279
+ image_contents.append({
280
+ "type": "image_url",
281
+ "image_url": {
282
+ "url": f"data:image/jpeg;base64,{base64_image}"
283
+ }
284
+ })
285
+
286
+ messages = [
287
+ {
288
+ "role": "user",
289
+ "content": [
290
+ {
291
+ "type": "text",
292
+ "text": "Extract the contents of these images in detail in a structured format, focusing on any text, tables, diagrams, or visual elements that might be relevant for document generation."
293
+ },
294
+ *image_contents
295
+ ]
296
+ }
297
+ ]
298
+
299
+ image_context = await self.ai_client.generate_vision_response(messages)
300
+ return image_context
301
+ except Exception as e:
302
+ print(f"Error processing images: {str(e)}")
303
+ return ""
304
+
305
+
306
+ class DatabaseManager:
307
+ """Manages database operations."""
308
+
309
+ def __init__(self):
310
+ self.db_params = {
311
+ "dbname": "postgres",
312
+ "user": os.environ['SUPABASE_USER'],
313
+ "password": os.environ['SUPABASE_PASSWORD'],
314
+ "host": "aws-0-us-west-1.pooler.supabase.com",
315
+ "port": "5432"
316
+ }
317
+
318
+ @log_execution
319
+ def update_database(self, user_id: str, user_query: str, response: str) -> None:
320
+ with psycopg2.connect(**self.db_params) as conn:
321
+ with conn.cursor() as cur:
322
+ insert_query = """
323
+ INSERT INTO ai_document_generator (user_id, user_query, response)
324
+ VALUES (%s, %s, %s);
325
+ """
326
+ cur.execute(insert_query, (user_id, user_query, response))
327
+
328
+ class DocumentGenerator:
329
+ def __init__(self, ai_client: AIClient):
330
+ self.ai_client = ai_client
331
+ self.document_outline = None
332
+ self.content_messages = []
333
+
334
+ @staticmethod
335
+ def extract_between_tags(text: str, tag: str) -> str:
336
+ pattern = f"<{tag}>(.*?)</{tag}>"
337
+ match = re.search(pattern, text, re.DOTALL)
338
+ return match.group(1).strip() if match else ""
339
+
340
+ @staticmethod
341
+ def remove_duplicate_title(content: str, title: str, section_number: str) -> str:
342
+ patterns = [
343
+ rf"^#+\s*{re.escape(section_number)}(?:\s+|\s*:\s*|\.\s*){re.escape(title)}",
344
+ rf"^#+\s*{re.escape(title)}",
345
+ rf"^{re.escape(section_number)}(?:\s+|\s*:\s*|\.\s*){re.escape(title)}",
346
+ rf"^{re.escape(title)}",
347
+ ]
348
+
349
+ for pattern in patterns:
350
+ content = re.sub(pattern, "", content, flags=re.MULTILINE | re.IGNORECASE)
351
+
352
+ return content.lstrip()
353
+
354
+ @log_execution
355
+ async def generate_document_outline(self, query: str, num_pages:int, template: bool = False, image_context: str = "", max_retries: int = 3) -> Optional[Dict]:
356
+ pages_prompt = "" if num_pages == 0 else f"consisting of {num_pages} pages "
357
+ messages = [
358
+ {"role": "system", "content": DOCUMENT_OUTLINE_PROMPT_SYSTEM if not template else DOCUMENT_TEMPLATE_OUTLINE_PROMPT_SYSTEM},
359
+ {"role": "user", "content": DOCUMENT_OUTLINE_PROMPT_USER.format(query=query, num_pages=pages_prompt) if not template else DOCUMENT_TEMPLATE_PROMPT_USER.format(query=query, num_pages=pages_prompt)}
360
+ ]
361
+ # Update user content to include image context if provided
362
+ if image_context:
363
+ messages[1]["content"] += f"<attached_images>\n\n{image_context}\n\n</attached_images>"
364
+
365
+ for attempt in range(max_retries):
366
+ outline_response = await self.ai_client.generate_response(messages, model="openai/gpt-4o")
367
+ outline_json_text = self.extract_between_tags(outline_response, "output")
368
+
369
+ try:
370
+ self.document_outline = json.loads(outline_json_text)
371
+ return self.document_outline
372
+ except json.JSONDecodeError as e:
373
+ if attempt < max_retries - 1:
374
+ logger.warning(f"Failed to parse JSON (attempt {attempt + 1}): {e}")
375
+ logger.info("Retrying...")
376
+ else:
377
+ logger.error(f"Failed to parse JSON after {max_retries} attempts: {e}")
378
+ return None
379
+
380
+ @log_execution
381
+ async def generate_content(self, title: str, content_instruction: str, section_number: str, template: bool = False) -> str:
382
+ SECTION_PROMPT_USER = DOCUMENT_SECTION_PROMPT_USER if not template else DOCUMENT_TEMPLATE_SECTION_PROMPT_USER
383
+ self.content_messages.append({
384
+ "role": "user",
385
+ "content": SECTION_PROMPT_USER.format(
386
+ #section_or_subsection_title=title,
387
+ content_instruction=content_instruction
388
+ )
389
+ })
390
+ section_response = await self.ai_client.generate_response(self.content_messages)
391
+ content = self.extract_between_tags(section_response, "response")
392
+ content = self.remove_duplicate_title(content, title, section_number)
393
+ self.content_messages.append({
394
+ "role": "assistant",
395
+ "content": section_response
396
+ })
397
+ return content
398
+
399
+ class MarkdownConverter:
400
+ @staticmethod
401
+ def slugify(text: str) -> str:
402
+ return re.sub(r'\W+', '-', text.lower())
403
+
404
+ @classmethod
405
+ def convert_to_markdown(cls, document: Dict) -> str:
406
+ markdown = "<div style='text-align: center; padding-top: 33vh;'>\n\n"
407
+ markdown += f"<h1 style='color: #2c3e50; border-bottom: 2px solid #3498db; padding-bottom: 10px; display: inline-block;'>{document['Title']}</h1>\n\n"
408
+ markdown += f"<p style='color: #7f8c8d;'><em>By {document['Author']}</em></p>\n\n"
409
+ markdown += f"<p style='color: #95a5a6;'>Version {document['Version']} | {document['Date']}</p>\n\n"
410
+ markdown += "</div>\n\n"
411
+
412
+ # Generate Table of Contents
413
+ markdown += "<div style='page-break-before: always;'></div>\n\n"
414
+ markdown += "<h2 style='color: #2c3e50; text-align: center;'>Table of Contents</h2>\n\n"
415
+ markdown += "<nav style='background-color: #f8f9fa; padding: 20px; border-radius: 5px; line-height: 1.6;'>\n\n"
416
+
417
+ for section in document['Pages']:
418
+ section_number = section['PageNumber']
419
+ section_title = section['Title']
420
+ markdown += f"<p><a href='#{cls.slugify(section_title)}' style='color: #3498db; text-decoration: none;'>{section_number}. {section_title}</a></p>\n\n"
421
+
422
+ for subsection in section.get('Subsections', []):
423
+ subsection_number = subsection['PageNumber']
424
+ subsection_title = subsection['Title']
425
+ markdown += f"<p style='margin-left: 20px;'><a href='#{cls.slugify(subsection_title)}' style='color: #2980b9; text-decoration: none;'>{subsection_number} {subsection_title}</a></p>\n\n"
426
+
427
+ markdown += "</nav>\n\n"
428
+
429
+ # Generate Content
430
+ markdown += "<div style='max-width: 800px; margin: 0 auto; font-family: \"Segoe UI\", Arial, sans-serif; line-height: 1.6;'>\n\n"
431
+
432
+ for section in document['Pages']:
433
+ markdown += "<div style='page-break-before: always;'></div>\n\n"
434
+ section_number = section['PageNumber']
435
+ section_title = section['Title']
436
+ markdown += f"<h2 id='{cls.slugify(section_title)}' style='color: #2c3e50; border-bottom: 1px solid #bdc3c7; padding-bottom: 5px;'>{section_number}. {section_title}</h2>\n\n"
437
+ markdown += f"<div style='color: #34495e; margin-bottom: 20px;'>\n\n{section['Content']}\n\n</div>\n\n"
438
+
439
+ markdown += "</div>"
440
+ return markdown
441
+
442
+ router = APIRouter()
443
+
444
+ class JsonDocumentResponse(BaseModel):
445
+ json_document: Dict
446
+
447
+ # class JsonDocumentRequest(BaseModel):
448
+ # query: str
449
+ # template: bool = False
450
+ # images: Optional[List[UploadFile]] = File(None)
451
+ # documents: Optional[List[UploadFile]] = File(None)
452
+ # conversation_id: str = ""
453
+
454
+ class MarkdownDocumentRequest(BaseModel):
455
+ json_document: Dict
456
+ query: str
457
+ template: bool = False
458
+ conversation_id: str = ""
459
+
460
+ MESSAGE_DELIMITER = b"\n---DELIMITER---\n"
461
+
462
+ def yield_message(message):
463
+ message_json = json.dumps(message, ensure_ascii=False).encode('utf-8')
464
+ return message_json + MESSAGE_DELIMITER
465
+
466
+ async def generate_document_stream(document_generator: DocumentGenerator, document_outline: Dict, query: str, template: bool = False, conversation_id: str = ""):
467
+ document_generator.document_outline = document_outline
468
+ db_manager = DatabaseManager()
469
+ overall_objective = query
470
+ document_layout = json.dumps(document_generator.document_outline["Document"]["Pages"], indent=2)
471
+ cache_key = f"image_context_{conversation_id}"
472
+ image_context = await FastAPICache.get_backend().get(cache_key)
473
+
474
+ SECTION_PROMPT_SYSTEM = DOCUMENT_SECTION_PROMPT_SYSTEM if not template else DOCUMENT_TEMPLATE_SECTION_PROMPT_SYSTEM
475
+ document_generator.content_messages = [
476
+ {
477
+ "role": "system",
478
+ "content": SECTION_PROMPT_SYSTEM.format(
479
+ overall_objective=overall_objective,
480
+ document_layout=document_layout
481
+ )
482
+ }
483
+ ]
484
+ if image_context:
485
+ document_generator.content_messages[0]["content"] += f"<attached_images>\n\n{image_context}\n\n</attached_images>"
486
+
487
+ for section in document_generator.document_outline["Document"].get("Pages", []):
488
+ section_title = section.get("Title", "")
489
+ section_number = section.get("PageNumber", "")
490
+ content_instruction = section.get("Content", "")
491
+
492
+ section_prompt_content = f"""# {section_number} {section_title}\n\n{content_instruction}\n\n"""
493
+
494
+ for subsection in section.get("Subsections", []):
495
+ subsection_title = subsection.get("Title", "")
496
+ subsection_number = subsection.get("PageNumber", "")
497
+ subsection_content_instruction = subsection.get("Content", "")
498
+ section_prompt_content += f"""## {subsection_number} {subsection_title}\n\n{subsection_content_instruction}\n\n"""
499
+
500
+ content = await document_generator.generate_content(section_title, section_prompt_content, section_number, template)
501
+ section["Content"] = content
502
+ yield yield_message({
503
+ "type": "document_section",
504
+ "content": {
505
+ "section_number": section_number,
506
+ "section_title": section_title,
507
+ "content": content
508
+ }
509
+ })
510
+
511
+ markdown_document = MarkdownConverter.convert_to_markdown(document_generator.document_outline["Document"])
512
+
513
+ yield yield_message({
514
+ "type": "complete_document",
515
+ "content": {
516
+ "markdown": markdown_document,
517
+ "json": document_generator.document_outline
518
+ },
519
+ });
520
+
521
+ db_manager.update_database("elevatics", query, markdown_document)
522
+
523
+ @router.post("/generate-document/markdown-stream")
524
+ async def generate_markdown_document_stream_endpoint(request: MarkdownDocumentRequest):
525
+ ai_client = AIClient()
526
+ document_generator = DocumentGenerator(ai_client)
527
+
528
+ async def stream_generator():
529
+ try:
530
+ async for chunk in generate_document_stream(document_generator, request.json_document, request.query, request.template, request.conversation_id):
531
+ yield chunk
532
+ except Exception as e:
533
+ yield yield_message({
534
+ "type": "error",
535
+ "content": str(e)
536
+ })
537
+
538
+ return StreamingResponse(stream_generator(), media_type="application/octet-stream")
539
+
540
+
541
+ @cache(expire=600*24*7)
542
+ @router.post("/generate-document/json", response_model=JsonDocumentResponse)
543
+ async def generate_document_outline_endpoint(
544
+ query: str = Form(...),
545
+ template: bool = Form(False),
546
+ conversation_id: str = Form(...),
547
+ num_pages:int = Form(...),
548
+ images: Optional[List[UploadFile]] = File(None),
549
+ documents: Optional[List[UploadFile]] = File(None)
550
+ ):
551
+ ai_client = AIClient()
552
+ document_generator = DocumentGenerator(ai_client)
553
+ vision_tools = VisionTools(ai_client)
554
+ try:
555
+ image_context = ""
556
+ if images:
557
+ image_context = await vision_tools.extract_images_info(images)
558
+
559
+ # Store the image_context in the cache
560
+ cache_key = f"image_context_{conversation_id}"
561
+ await FastAPICache.get_backend().set(cache_key, image_context, expire=3600) # Cache for 1 hour
562
+
563
+ json_document = await document_generator.generate_document_outline(
564
+ query,
565
+ num_pages,
566
+ template,
567
+ image_context=image_context
568
+ )
569
+
570
+ if json_document is None:
571
+ raise HTTPException(status_code=500, detail="Failed to generate a valid document outline")
572
+
573
+ return JsonDocumentResponse(json_document=json_document)
574
+ except Exception as e:
575
+ raise HTTPException(status_code=500, detail=str(e))
576
+
577
+
578
+
579
+ ## OBSERVABILITY
580
+ from uuid import uuid4
581
+ import csv
582
+ from io import StringIO
583
+
584
+ class ObservationResponse(BaseModel):
585
+ observations: List[Dict]
586
+
587
+ def create_csv_response(observations: List[Dict]) -> StreamingResponse:
588
+ def iter_csv(data):
589
+ output = StringIO()
590
+ writer = csv.DictWriter(output, fieldnames=data[0].keys() if data else [])
591
+ writer.writeheader()
592
+ for row in data:
593
+ writer.writerow(row)
594
+ output.seek(0)
595
+ yield output.read()
596
+
597
+ headers = {
598
+ 'Content-Disposition': 'attachment; filename="observations.csv"'
599
+ }
600
+ return StreamingResponse(iter_csv(observations), media_type="text/csv", headers=headers)
601
+
602
+
603
+ @router.get("/last-observations/{limit}")
604
+ async def get_last_observations(limit: int = 10, format: str = "json"):
605
+ observability_manager = LLMObservabilityManager()
606
+
607
+ try:
608
+ # Get all observations, sorted by created_at in descending order
609
+ all_observations = observability_manager.get_observations()
610
+ all_observations.sort(key=lambda x: x['created_at'], reverse=True)
611
+
612
+ # Get the last conversation_id
613
+ if all_observations:
614
+ last_conversation_id = all_observations[0]['conversation_id']
615
+
616
+ # Filter observations for the last conversation
617
+ last_conversation_observations = [
618
+ obs for obs in all_observations
619
+ if obs['conversation_id'] == last_conversation_id
620
+ ][:limit]
621
+
622
+ if format.lower() == "csv":
623
+ return create_csv_response(last_conversation_observations)
624
+ else:
625
+ return ObservationResponse(observations=last_conversation_observations)
626
+ else:
627
+ if format.lower() == "csv":
628
+ return create_csv_response([])
629
+ else:
630
+ return ObservationResponse(observations=[])
631
+ except Exception as e:
632
+ raise HTTPException(status_code=500, detail=f"Failed to retrieve observations: {str(e)}")
633
+
634
+ ## TEST CACHE
635
+
636
+ class CacheItem(BaseModel):
637
+ key: str
638
+ value: str
639
+
640
+ @router.post("/set-cache")
641
+ async def set_cache(item: CacheItem):
642
+ try:
643
+ # Set the cache with a default expiration of 1 hour (3600 seconds)
644
+ await FastAPICache.get_backend().set(item.key, item.value, expire=3600)
645
+ return {"message": f"Cache set for key: {item.key}"}
646
+ except Exception as e:
647
+ raise HTTPException(status_code=500, detail=f"Failed to set cache: {str(e)}")
648
+
649
+ @router.get("/get-cache/{key}")
650
+ async def get_cache(key: str):
651
+ try:
652
+ value = await FastAPICache.get_backend().get(key)
653
+ if value is None:
654
+ raise HTTPException(status_code=404, detail=f"No cache found for key: {key}")
655
+ return {"key": key, "value": value}
656
+ except Exception as e:
657
+ raise HTTPException(status_code=500, detail=f"Failed to get cache: {str(e)}")