|
from flask import Flask, render_template, request, jsonify, send_from_directory, current_app, send_file,abort,make_response |
|
from dotenv import load_dotenv |
|
from flask_cors import CORS |
|
import os |
|
import asyncio |
|
from functools import wraps |
|
import logging |
|
import weaviate |
|
from openai import AsyncOpenAI |
|
from config import COLLECTION_NAME |
|
import re |
|
import threading |
|
import queue |
|
import time |
|
from weaviate.exceptions import WeaviateTimeoutError |
|
from functools import lru_cache |
|
from flask_talisman import Talisman |
|
import concurrent.futures |
|
import psutil |
|
from collections import deque |
|
|
|
|
|
basedir = os.path.abspath(os.path.dirname(__file__)) |
|
|
|
app = Flask(__name__) |
|
Talisman(app, content_security_policy=None) |
|
|
|
|
|
load_dotenv() |
|
|
|
|
|
logging.basicConfig(level=logging.INFO) |
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
openai_client = AsyncOpenAI(api_key=os.getenv('OPENAI_API_KEY')) |
|
|
|
|
|
client = None |
|
|
|
|
|
connection_status = {"status": "Disconnected", "color": "red"} |
|
|
|
|
|
conversation_history = deque(maxlen=10) |
|
|
|
|
|
@app.after_request |
|
def add_csp_headers(response): |
|
csp = ( |
|
"default-src 'self' https: data: 'unsafe-inline' 'unsafe-eval'; " |
|
"script-src 'self' https: 'unsafe-inline' 'unsafe-eval'; " |
|
"style-src 'self' https: 'unsafe-inline'; " |
|
"img-src 'self' data: https:; " |
|
"connect-src 'self' https:; " |
|
"font-src 'self' https:; " |
|
"object-src 'none'; " |
|
"media-src 'self' https:; " |
|
"frame-src 'self' https:; " |
|
"worker-src 'self' blob:; " |
|
"form-action 'self'; " |
|
"base-uri 'self'; " |
|
"frame-ancestors 'self';" |
|
) |
|
response.headers['Content-Security-Policy'] = csp |
|
return response |
|
|
|
@lru_cache(maxsize=1) |
|
def get_weaviate_client(): |
|
return weaviate.Client( |
|
url=os.getenv('WCS_URL'), |
|
auth_client_secret=weaviate.auth.AuthApiKey(os.getenv('WCS_API_KEY')), |
|
additional_headers={ |
|
"X-OpenAI-Api-Key": os.getenv('OPENAI_API_KEY') |
|
}, |
|
timeout_config=(5, 60) |
|
) |
|
|
|
def get_or_create_client(): |
|
global client |
|
if client is None: |
|
client = get_weaviate_client() |
|
return client |
|
|
|
def initialize_weaviate_client(max_retries=3, retry_delay=5): |
|
global connection_status |
|
retries = 0 |
|
while retries < max_retries: |
|
connection_status = {"status": "Connecting...", "color": "orange"} |
|
try: |
|
logger.info(f"Attempting to connect to Weaviate (Attempt {retries + 1}/{max_retries})") |
|
client = get_or_create_client() |
|
|
|
client.schema.get() |
|
connection_status = {"status": "Connected", "color": "green"} |
|
logger.info("Successfully connected to Weaviate") |
|
return connection_status |
|
except Exception as e: |
|
logger.error(f"Error connecting to Weaviate: {str(e)}") |
|
connection_status = {"status": f"Error: {str(e)}", "color": "red"} |
|
retries += 1 |
|
if retries < max_retries: |
|
logger.info(f"Retrying in {retry_delay} seconds...") |
|
time.sleep(retry_delay) |
|
else: |
|
logger.error("Max retries reached. Could not connect to Weaviate.") |
|
return connection_status |
|
|
|
|
|
initialization_thread = threading.Thread(target=initialize_weaviate_client) |
|
initialization_thread.start() |
|
|
|
|
|
def async_lru_cache(maxsize=1024): |
|
cache = {} |
|
|
|
def decorator(func): |
|
@wraps(func) |
|
async def wrapper(*args, **kwargs): |
|
key = str(args) + str(kwargs) |
|
if key not in cache: |
|
if len(cache) >= maxsize: |
|
cache.pop(next(iter(cache))) |
|
cache[key] = await func(*args, **kwargs) |
|
return cache[key] |
|
return wrapper |
|
return decorator |
|
|
|
@async_lru_cache(maxsize=1000) |
|
async def get_embedding(text): |
|
response = await openai_client.embeddings.create( |
|
input=text, |
|
model="text-embedding-3-large" |
|
) |
|
return response.data[0].embedding |
|
|
|
async def search_multimodal(query: str, limit: int = 30, alpha: float = 0.6): |
|
logger.info(f"Starting multimodal search for query: {query}") |
|
try: |
|
query_vector = await get_embedding(query) |
|
logger.info(f"Generated query embedding of length {len(query_vector)}") |
|
|
|
response = await asyncio.to_thread( |
|
client.query.get(COLLECTION_NAME, ["content_type", "source_document", "page_number", |
|
"paragraph_number", "text", "image_path", "description", "table_content"]) |
|
.with_hybrid(query=query, vector=query_vector, alpha=alpha) |
|
.with_limit(limit) |
|
.do |
|
) |
|
|
|
results = response['data']['Get'][COLLECTION_NAME] |
|
logger.info(f"Search completed. Found {len(results)} results.") |
|
return results |
|
except Exception as e: |
|
logger.error(f"Error in search_multimodal: {str(e)}", exc_info=True) |
|
return [] |
|
|
|
async def generate_response_stream(query: str, context: str): |
|
prompt = f""" |
|
You are an AI assistant with extensive expertise in the semiconductor industry. Your knowledge spans a wide range of companies, technologies, and products, including but not limited to: System-on-Chip (SoC) designs, Field-Programmable Gate Arrays (FPGAs), Microcontrollers, Integrated Circuits (ICs), semiconductor manufacturing processes, and emerging technologies like quantum computing and neuromorphic chips. |
|
|
|
Use the following context, your vast knowledge, and the user's question to generate an accurate, comprehensive, and insightful answer. While formulating your response, follow these steps internally: |
|
|
|
1. Analyze the question to identify the main topic and specific information requested. |
|
2. Evaluate the provided context and identify relevant information. |
|
3. Retrieve additional relevant knowledge from your semiconductor industry expertise. |
|
4. Reason and formulate a response by combining context and knowledge. |
|
5. Generate a detailed response that covers all aspects of the query. |
|
6. Review and refine your answer for coherence and accuracy. |
|
|
|
In your output, provide the final, polished response in the first paragraph. Do not include your step-by-step reasoning or mention the process you followed. |
|
|
|
IMPORTANT: Ensure your response is grounded in factual information. Do not hallucinate or invent information. If you're unsure about any aspect of the answer or if the necessary information is not available in the provided context or your knowledge base, clearly state this uncertainty. |
|
|
|
After your response, on a new line, write "Top 5 most relevant sources used to generate the response:" followed by the top 5 most relevant sources. Rank them based on their relevance and importance to the answer. Format each source as follows: |
|
[Rank]. [Content Type] from [Document Name] (Page [Page Number], [Additional Info]) |
|
|
|
For example: |
|
Top 5 most relevant sources used to generate the response: |
|
1. Text from Semiconductor Industry Report 2023 (Page 15, Paragraph 3) |
|
2. Table from FPGA Market Analysis (Page 7, Table 2.1) |
|
3. Image Description from SoC Architecture Diagram (Page 22, Path: ./data/images/soc_diagram.jpg) |
|
|
|
Context: {context} |
|
|
|
User Question: {query} |
|
|
|
Based on the above context and your extensive knowledge of the semiconductor industry, provide your detailed, accurate, and grounded response below, followed by the top 5 ranked sources: |
|
""" |
|
|
|
async for chunk in await openai_client.chat.completions.create( |
|
model="gpt-4o-mini", |
|
messages=[ |
|
{"role": "system", "content": "You are an expert Semi Conductor industry analyst"}, |
|
{"role": "user", "content": prompt} |
|
], |
|
temperature=0, |
|
max_tokens=500, |
|
stream=True |
|
): |
|
content = chunk.choices[0].delta.content |
|
if content is not None: |
|
yield content |
|
|
|
def process_search_result(item): |
|
if item['content_type'] == 'text': |
|
return f"Text from {item['source_document']} (Page {item['page_number']}, Paragraph {item['paragraph_number']}): {item['text']}\n\n" |
|
elif item['content_type'] == 'image': |
|
return f"Image Description from {item['source_document']} (Page {item['page_number']}, Path: {item['image_path']}): {item['description']}\n\n" |
|
elif item['content_type'] == 'table': |
|
return f"Table Description from {item['source_document']} (Page {item['page_number']}): {item['description']}\n\n" |
|
return "" |
|
|
|
async def generate_follow_up_questions(answer): |
|
prompt = f""" |
|
Based on the following response, generate exactly 2 follow-up questions:\n\n{answer}\n\nFollow-up questions: |
|
""" |
|
|
|
response = await openai_client.chat.completions.create( |
|
model="gpt-4o-mini", |
|
messages=[ |
|
{"role": "system", "content": "You are a helpful assistant generating follow-up questions."}, |
|
{"role": "user", "content": prompt} |
|
], |
|
max_tokens=100, |
|
n=1, |
|
temperature=0.2 |
|
) |
|
|
|
follow_up_questions = response.choices[0].message.content.strip().split("\n") |
|
return [q.strip() for q in follow_up_questions[:2] if q.strip()] |
|
|
|
async def esg_analysis_stream(user_query: str, previous_context: str = None): |
|
try: |
|
logger.info(f"Processing query: {user_query}") |
|
|
|
if previous_context: |
|
|
|
context = previous_context |
|
logger.info("Using previous context for follow-up question") |
|
else: |
|
|
|
search_results = await search_multimodal(user_query) |
|
logger.info(f"Found {len(search_results)} search results") |
|
|
|
if not search_results: |
|
return "I'm sorry, but I couldn't find any relevant information to answer your query.", "", [] |
|
|
|
|
|
with concurrent.futures.ThreadPoolExecutor() as executor: |
|
context_parts = list(await asyncio.get_event_loop().run_in_executor( |
|
executor, |
|
lambda: list(executor.map(process_search_result, search_results)) |
|
)) |
|
context = "".join(context_parts) |
|
logger.info(f"Processed search results into context of length {len(context)}") |
|
|
|
|
|
response_task = asyncio.create_task(generate_and_split_response(user_query, context)) |
|
follow_up_task = asyncio.create_task(generate_follow_up_questions(user_query)) |
|
|
|
main_response, sources = await response_task |
|
follow_up_questions = await follow_up_task |
|
|
|
return main_response, sources, follow_up_questions, context |
|
|
|
except Exception as e: |
|
logger.error(f"Error in esg_analysis_stream: {str(e)}", exc_info=True) |
|
return "I apologize, but an error occurred while processing your request.", "", [], "" |
|
|
|
async def generate_and_split_response(query: str, context: str): |
|
full_response = await generate_response(query, context) |
|
parts = full_response.split("Top 5 most relevant sources used to generate the response:", 1) |
|
main_response = parts[0].strip() if parts else full_response |
|
sources = parts[1].strip() if len(parts) > 1 else "" |
|
return main_response, sources |
|
|
|
async def generate_response(query: str, context: str): |
|
prompt = f""" |
|
You are an AI assistant with extensive expertise in the semiconductor industry. Your knowledge spans a wide range of companies, technologies, and products, including but not limited to: System-on-Chip (SoC) designs, Field-Programmable Gate Arrays (FPGAs), Microcontrollers, Integrated Circuits (ICs), semiconductor manufacturing processes, and emerging technologies like quantum computing and neuromorphic chips. |
|
|
|
Use the following context, your vast knowledge, and the user's question to generate an accurate, comprehensive, and insightful answer. While formulating your response, follow these steps internally: |
|
|
|
Analyze the question to identify the main topic and specific information requested. |
|
Evaluate the provided context and identify relevant information. |
|
Retrieve additional relevant knowledge from your semiconductor industry expertise. |
|
Reason and formulate a response by combining context and knowledge. |
|
Generate a detailed response that covers all aspects of the query. |
|
Review and refine your answer for coherence and accuracy. |
|
Also when any general query is asked respond like you are a human and answer the question as you would answer in real life. |
|
Do not give response with information about the company or any other information for queries like Hi, Hello, How are you etc. |
|
|
|
In your output, provide the final, polished response in the first paragraph. Do not include your step-by-step reasoning or mention the process you followed. |
|
|
|
IMPORTANT NOTE: Ensure your response is grounded in factual information. Do not hallucinate or invent information. If you're unsure about any aspect of the answer or if the necessary information is not available in the provided context or your knowledge base, clearly state this uncertainty. |
|
|
|
After your response, on a new line, write "Top 5 most relevant sources used to generate the response:" followed by the top 5 most relevant sources. Rank them based on their relevance and importance to the answer. Format each source as follows: |
|
[Rank]. [Content Type] from [Document Name] (Page [Page Number], [Additional Info]) |
|
|
|
For example: |
|
Top 5 most relevant sources used to generate the response: |
|
|
|
Text from Semiconductor Industry Report 2023 (Page 15, Paragraph 3) |
|
Table from FPGA Market Analysis (Page 7, Table 2.1) |
|
Image Description from SoC Architecture Diagram (Page 22, Path: ./data/images/soc_diagram.jpg) |
|
|
|
IMPORTANT NOTE: Only provide sources if it is referenced or mentioned in the response. |
|
Context: {context} |
|
|
|
User Question: {query} |
|
|
|
Based on the above context and your extensive knowledge of the semiconductor industry, provide your detailed, accurate, and grounded response below, followed by the top 5 ranked sources: |
|
""" |
|
|
|
response = await openai_client.chat.completions.create( |
|
model="gpt-4o-mini", |
|
messages=[ |
|
{"role": "system", "content": "You are an expert Semi Conductor industry analyst"}, |
|
{"role": "user", "content": prompt} |
|
], |
|
temperature=0, |
|
max_tokens=500 |
|
) |
|
return response.choices[0].message.content |
|
|
|
@app.route('/') |
|
def index(): |
|
return render_template('index.html') |
|
|
|
@app.route('/ask', methods=['POST']) |
|
async def ask(): |
|
global connection_status, conversation_history |
|
if connection_status["status"] != "Connected": |
|
initialize_weaviate_client() |
|
|
|
if connection_status["status"] != "Connected": |
|
return jsonify({'error': 'Weaviate client is not connected'}), 503 |
|
|
|
try: |
|
user_question = request.json['question'] |
|
|
|
|
|
if conversation_history and any(keyword in user_question.lower() for keyword in ["previous", "before", "last"]): |
|
previous_context = conversation_history[-1]['context'] |
|
main_response, sources, follow_up_questions, context = await esg_analysis_stream(user_question, previous_context) |
|
else: |
|
main_response, sources, follow_up_questions, context = await esg_analysis_stream(user_question) |
|
|
|
|
|
conversation_history.append({ |
|
'question': user_question, |
|
'response': main_response, |
|
'sources': sources, |
|
'context': context |
|
}) |
|
|
|
response_data = { |
|
'response': main_response, |
|
'sources': sources, |
|
'follow_up_questions': follow_up_questions[:2] |
|
} |
|
return jsonify(response_data) |
|
except Exception as e: |
|
logger.error(f"Error processing request: {str(e)}", exc_info=True) |
|
return jsonify({'error': 'An error occurred while processing your request'}), 500 |
|
|
|
@app.route('/data/<path:filename>') |
|
def serve_data_file(filename): |
|
try: |
|
|
|
if filename.startswith('./data/'): |
|
filename = filename[7:] |
|
return send_from_directory('data', filename, mimetype='application/pdf') |
|
except FileNotFoundError: |
|
return f"Error: File {filename} not found", 404 |
|
|
|
@app.route('/status') |
|
def status(): |
|
return jsonify(connection_status) |
|
|
|
@app.route('/test-pdf') |
|
def test_pdf(): |
|
return ''' |
|
<h1>PDF Test</h1> |
|
<object data="./data/DS950 - Versal Architecture and Product Data Sheet - Overview - v2.2 - 240604.pdf" type="application/pdf" width="100%" height="500px"> |
|
<p>It appears you don't have a PDF plugin for this browser. |
|
No biggie... you can <a href="./data/DS950 - Versal Architecture and Product Data Sheet - Overview - v2.2 - 240604.pdf">click here to download the PDF file.</a></p> |
|
</object> |
|
''' |
|
|
|
@app.route('/check_connection', methods=['GET']) |
|
def check_connection(): |
|
global connection_status |
|
if connection_status["status"] != "Connected": |
|
initialize_weaviate_client() |
|
return jsonify(connection_status) |
|
|
|
@app.route('/history', methods=['GET']) |
|
def get_history(): |
|
global conversation_history |
|
history_data = list(conversation_history) |
|
return jsonify(history_data) |
|
|
|
if __name__ == '__main__': |
|
CORS(app, resources={r"/*": {"origins": "*"}}) |
|
|
|
|
|
app.run(host="0.0.0.0", port=7860, debug=True, threaded=True) |
|
|
|
|
|
|