Spaces:
Runtime error
Runtime error
import asyncio | |
asyncio.set_event_loop(asyncio.new_event_loop()) | |
from llama_index.core import ( | |
VectorStoreIndex, | |
ServiceContext, | |
SimpleDirectoryReader, | |
load_index_from_storage, | |
) | |
from llama_index.core.storage import StorageContext | |
from llama_index.core.node_parser import SentenceSplitter | |
from llama_index.core.prompts import PromptTemplate | |
from llama_index.core.response_synthesizers import TreeSummarize | |
from llama_index.core.query_pipeline import InputComponent | |
from llama_index.core.indices.knowledge_graph import KGTableRetriever | |
from llama_index.legacy.vector_stores.faiss import FaissVectorStore | |
from llama_index.llms.openai import OpenAI | |
from llama_index.embeddings.openai import OpenAIEmbedding | |
from llama_index.core import Settings | |
import openai | |
import os | |
from github import Github | |
from datetime import datetime | |
import gradio as gr | |
# OpenAI: | |
openai.api_key = os.environ.get('openai_key') | |
os.environ["OPENAI_API_KEY"] = os.environ.get('openai_key') | |
# Github: | |
exec(os.environ.get('logs_context')) | |
# Context: | |
exec(os.environ.get('context')) | |
project_name = "DEV PharmaWise Data Integrity Chat 4.5" | |
import networkx as nx | |
import matplotlib.pyplot as plt | |
from PIL import Image | |
from io import BytesIO | |
def draw_graph(): | |
global kg_data | |
G = nx.DiGraph() | |
for source, relation, target in kg_data: | |
G.add_edge(source, target, label=relation) | |
# Utilizar spring_layout para mejorar la disposición de los nodos | |
pos = nx.spring_layout(G) | |
plt.figure(figsize=(12, 8)) | |
# Ajustar el tamaño de los nodos | |
nx.draw(G, pos, with_labels=True, node_color='skyblue', node_size=400, edge_color='k', linewidths=1, font_size=8, font_weight='bold') | |
# Ajustar el tamaño de las flechas y el espaciado entre ellas | |
edge_labels = {} | |
for source, target, data in G.edges(data=True): | |
if 'label' in data: | |
edge_labels[(source, target)] = data['label'] | |
nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels, font_size=7, font_weight='normal') | |
plt.title("Graph") | |
plt.axis('off') | |
buf = BytesIO() | |
plt.savefig(buf, format='png') | |
buf.seek(0) | |
plt.close() | |
return Image.open(buf) | |
def extraer_informacion_metadata(response, max_results=10): | |
# Obtener source_nodes de la respuesta | |
source_nodes = response.source_nodes | |
# Obtener page_labels, file_names y scores de source_nodes | |
page_file_info = [ | |
f"Página {node.node.metadata.get('page_label', '')} del archivo {node.node.metadata['file_name']} (Relevance: {node.score:.2f} - Id: {node.node.id_})" | |
for node in source_nodes | |
if 'file_name' in node.node.metadata and node.node.metadata['file_name'] | |
] | |
# Limitar la cantidad de resultados | |
page_file_info = page_file_info[:max_results] | |
return page_file_info | |
from typing import List | |
from llama_index.core import Prompt | |
from llama_index.core.llms import ChatMessage, MessageRole | |
from llama_index.core.chat_engine.context import ContextChatEngine | |
from llama_index.core.memory import ChatMemoryBuffer | |
chat_history_engine = [] | |
result_metadata = "" | |
with gr.Blocks(theme='sudeepshouche/minimalist') as demo: | |
def visible(): | |
return {btn_graph: gr.Button(value="Grafo", visible=True)} | |
def get_ref(): | |
return {mkdn: gr.Markdown(result_metadata)} | |
def refresh(chat_history): | |
global kg_data | |
global chat_history_engine | |
global result_metadata | |
kg_data = [] | |
chat_history_engine = [] | |
result_metadata = "" | |
chat_history = [[None, None]] | |
gr.Info("¡Listo! Ya puedes seguir chateando.") | |
return chat_history | |
def summarize_assistant_messages(chat_history: List[ChatMessage]) -> List[ChatMessage]: | |
# Encontrar la anteúltima respuesta del asistente | |
assistant_messages = [msg for msg in chat_history if msg.role == MessageRole.ASSISTANT] | |
if len(assistant_messages) < 2: | |
return chat_history # No hay suficientes mensajes del asistente para resumir | |
anteultima_respuesta = assistant_messages[-2] | |
# Usar GPT-3.5 para generar un resumen de la anteúltima respuesta del asistente | |
prompt = Prompt(f"Responder SOLO con un resumen del siguiente texto: \n\n{anteultima_respuesta.content}") | |
llm = OpenAI(model="gpt-3.5-turbo", temperature=0.1) | |
response = llm.predict(prompt) | |
# Crear un nuevo ChatMessage con el resumen como contenido y el rol de asistente | |
summarized_message = ChatMessage(content=response, role=MessageRole.ASSISTANT) | |
# Reconstruir el historial de chat reemplazando la anteúltima respuesta del asistente con el resumen | |
new_chat_history = [msg if msg != anteultima_respuesta else summarized_message for msg in chat_history] | |
return new_chat_history | |
def respond(message, chat_history): | |
global chat_history_engine | |
global result_metadata | |
# Verificar si el mensaje está vacío o no contiene nada | |
if not message.strip(): | |
gr.Info("Escribe un mensaje en el chat") | |
if chat_history: | |
return "", chat_history | |
return | |
# Si chat_history está vacío, inicialízalo con el mensaje del usuario actual | |
if not chat_history: | |
chat_history = [[message, ""]] | |
else: | |
# Si chat_history no está vacío, agrega el mensaje actual al final de la lista | |
chat_history.append([message, ""]) | |
chat_history_engine = summarize_assistant_messages(chat_history_engine) | |
#chat_history_engine.append(ChatMessage(role=MessageRole.USER, content=message)) | |
response = chat_engine.stream_chat(message, chat_history=chat_history_engine) | |
# Extraer información de source_nodes | |
metadata_info = extraer_informacion_metadata(response, max_results=10) | |
# Presentar la información de source_nodes en forma de lista con bullets | |
if metadata_info: | |
metadata_list = "\n".join(["- " + info for info in metadata_info]) | |
result_metadata = "\n\n" + metadata_list | |
for text in response.response_gen: | |
chat_history[-1][1] += text # Agrega el texto de respuesta al último mensaje en chat_history | |
yield "", chat_history | |
print("----------") | |
print(memory.get_all()) | |
gr.Markdown(""" | |
# PharmaWise Data Integrity Chat 4.5 | |
Realiza preguntas a tus datos y obtén al final del texto las paginas y documentos utilizados generar tu responder. | |
""") | |
with gr.Row(): | |
with gr.Column(): | |
chatbot = gr.Chatbot(show_label=False, show_copy_button=True, ) #layout="panel" | |
pregunta = gr.Textbox(show_label=False, autofocus=True, placeholder="Realiza tu consulta...") | |
pregunta.submit(respond, [pregunta, chatbot], [pregunta, chatbot]) | |
with gr.Row(): | |
btn_send = gr.Button(value="Preguntar", variant="primary") | |
clear = gr.Button(value="Limpiar") | |
gr.Examples(label="Ejemplos", examples=["Explicar el concepto ALCOA"], inputs=[pregunta]) | |
with gr.Column(): | |
with gr.Accordion(label="Bases de datos del conocimiento", open=False): | |
gr.Markdown(""" | |
###### [1] ISPE Risk Based Approach to Compliant Electronic Records and Signatures.pdf | |
###### [2] EMA Guideline on computerised systems and electronic data in clinical trials.pdf | |
###### [3] EU GMP guide annexes Supplementary requirements Annex 11 Computerised systems.pdf | |
###### [4] FDA Data Integrity and Compliance With Drug CGMP (Q&A) Guidance for Industry.pdf | |
###### [5] GAMP 5 A Risk Based Approach to Compliant GxP Computerized System (ED2).pdf | |
###### [6] ISPE Application of GAMP 5 to Implementation of a GxP Clinical System.pdf | |
###### [7] ISPE Guide_ Project Management for the Pharmaceutical Industry - ISPE.pdf | |
###### [8] ISPE Science and Risk-Based Approach for the Delivery of Facilities, Systems, and Equipment.pdf | |
###### [9] GAMP Good Practice Guide The Validation of Legacy Systems.pdf | |
###### [10] ISPE Manufacturing Execution Systems.pdf | |
###### [11] MHRA GXP Data Integrity Guidance and Definitions (2018).pdf | |
###### [12] PI 041-1 Good Practices for Data Management and Integrity in Regulated Environments (2021).pdf | |
###### [13] ISPE Records and Data Integrity Guide.pdf | |
###### [14] ISPE Testing GxP Systems.pdf | |
###### [15] WHO TR 1033 Annex 4 Guideline on data integrity.pdf | |
###### [16] ISPE Validation of Laboratory Computerized Systems 2005.pdf | |
###### [17] FDA Guidance for Industry Part 11, Electronic Records; Electronic Signatures.pdf | |
###### [18] FDA General Principles of Software Validation -2002.pdf | |
###### [19] FDA Guidance for Industry Computerized Systems Used in Clinical Trials.pdf | |
###### [20] FDA Guidance-Computer-Software-Assurance - 2022.pdf | |
###### [21] FDA 21 CFR Part 11 Electronic Records Electronic-Signatures - 2003.pdf | |
###### [22] EMA Annex 15 Qualification and Validation.pdf | |
###### [23] EMA Annex 11 Computerised Systems.pdf | |
###### [24] ANMAT Disposicion_3827-2018 - Anexo 6 Sistemas Informaticos.pdf | |
###### [25] DIGEMID DS-021 5.6 Sistemas Computadorizados.pdf | |
###### [26] COFEPRIS BPM-NOM059-2015 9.13 Validación de sistemas computacionales.pdf | |
###### [27] PA-PH-OMCL (08) 88 R5 Annex 2 Validation of Complex Computerised Systems.pdf | |
###### [28] INVIMA Resolución_3619 - GLP- 2013 5 Equipos procesadores de datos.pdf | |
###### [29] PAPHOMCL (08) 87 R6 Annex 1 Validation of Excel Spreadsheets.pdf | |
###### [30] PA-PH-OMCL (08) 69 R7 Validation of Computerised Systems.pdf | |
###### [31] GUÍA BPM ARCSA 2020 (software).pdf | |
###### [32] GMP Paraguay DINAVISA Resolucion 197-21.pdf | |
###### [33] ANVISA Guide for Computer Systems Validation 33-2020.pdf | |
###### [34] ANVISA INSTRUÇÃO NORMATIVA - IN Nº 43 - 2019.pdf | |
###### [35] PIC_011_3_recommendation_on_computerised_systems.pdf | |
###### [36] PIC Revision of Annex 11 EU GMP.pdf | |
""") | |
with gr.Accordion(label="Referencias", open=True): | |
mkdn = gr.Markdown() | |
with gr.Row(): | |
btn_graph = gr.Button(value="Grafo") | |
btn_ref = gr.Button(value="Referencias") | |
with gr.Row(): | |
grafo = gr.Image(label="Grafo", show_share_button=False) | |
btn_ref.click(get_ref, outputs=[mkdn]) | |
btn_send.click(respond, [pregunta, chatbot], [pregunta, chatbot]) | |
btn_graph.click(draw_graph, outputs=[grafo]) | |
clear.click(refresh, inputs=[chatbot], outputs=[chatbot]) | |
#response.change(visible, [], [btn_graph]) | |
demo.queue(default_concurrency_limit=20) | |
demo.launch() |