Update app.py
Browse files
app.py
CHANGED
@@ -11,41 +11,45 @@ from langchain_pinecone import PineconeVectorStore
|
|
11 |
st.set_page_config('Opositor')
|
12 |
st.header("Pregunta al trebep")
|
13 |
|
14 |
-
|
15 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
# CARGAMOS MODELO DE EMBEDDING
|
17 |
model_name = 'intfloat/multilingual-e5-base'
|
18 |
embedding = HuggingFaceEmbeddings(model_name=model_name)
|
|
|
|
|
|
|
|
|
19 |
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
#
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
os.environ["PINECONE_API_KEY"] ='4bf0b4cf-4ced-4f70-8977-d60bb8ae405a'
|
37 |
-
index_name = "boe-intfloat-multilingual-e5-base"
|
38 |
-
namespace = "trebep"
|
39 |
-
vectorstore = PineconeVectorStore(index_name=index_name, namespace=namespace, embedding=embedding)
|
40 |
-
|
41 |
-
# CREAMOS EL RETRIEVAL
|
42 |
-
qa = RetrievalQA.from_chain_type(
|
43 |
-
llm=llm,
|
44 |
-
chain_type="stuff",
|
45 |
-
retriever=vectorstore.as_retriever(),
|
46 |
-
return_source_documents=True,
|
47 |
-
#verbose=True
|
48 |
-
)
|
49 |
|
50 |
# Función para mostrar logs
|
51 |
def mostrar_logs(logs,hints):
|
@@ -68,6 +72,7 @@ user_question = st.text_input("¡A jugar! Haz una pregunta al trebep:")
|
|
68 |
if user_question:
|
69 |
|
70 |
# Inicializar entorno
|
|
|
71 |
|
72 |
# Obtenemos la respuesta
|
73 |
respuesta = qa.invoke(user_question)
|
|
|
11 |
st.set_page_config('Opositor')
|
12 |
st.header("Pregunta al trebep")
|
13 |
|
14 |
+
modelos_llm = [
|
15 |
+
'llama3-70b-8192',
|
16 |
+
'llama3-8b-8192',
|
17 |
+
'mixtral-8x7b-32768',
|
18 |
+
'gemma-7b-it'
|
19 |
+
]
|
20 |
+
modelo_llm = st.selectbox('Modelo de lenguaje', list(modelos_llm))
|
21 |
+
|
22 |
+
@st.cache_resource
|
23 |
+
def setup(modelo_llm):
|
24 |
+
# Langsmith
|
25 |
+
os.environ["LANGCHAIN_TRACING_V2"] = "true"
|
26 |
+
os.environ["LANGCHAIN_API_KEY"] = "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"
|
27 |
+
os.environ["LANGCHAIN_PROJECT"] = "trebep"
|
28 |
+
|
29 |
# CARGAMOS MODELO DE EMBEDDING
|
30 |
model_name = 'intfloat/multilingual-e5-base'
|
31 |
embedding = HuggingFaceEmbeddings(model_name=model_name)
|
32 |
+
|
33 |
+
# CARGAMOS LLM
|
34 |
+
os.environ["GROQ_API_KEY"] = "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"
|
35 |
+
llm = ChatGroq(model = modelo_llm)
|
36 |
|
37 |
+
# CARGAMOS EL VECTORSTORE DE PINECONE
|
38 |
+
os.environ["PINECONE_API_KEY"] ='4bf0b4cf-4ced-4f70-8977-d60bb8ae405a'
|
39 |
+
index_name = "boe-intfloat-multilingual-e5-base"
|
40 |
+
namespace = "trebep"
|
41 |
+
vectorstore = PineconeVectorStore(index_name=index_name, namespace=namespace, embedding=embedding)
|
42 |
+
|
43 |
+
# CREAMOS EL RETRIEVAL
|
44 |
+
qa = RetrievalQA.from_chain_type(
|
45 |
+
llm=llm,
|
46 |
+
chain_type="stuff",
|
47 |
+
retriever=vectorstore.as_retriever(),
|
48 |
+
return_source_documents=True,
|
49 |
+
#verbose=True
|
50 |
+
)
|
51 |
+
|
52 |
+
return qa
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
|
54 |
# Función para mostrar logs
|
55 |
def mostrar_logs(logs,hints):
|
|
|
72 |
if user_question:
|
73 |
|
74 |
# Inicializar entorno
|
75 |
+
qa = setup(modelo_llm)
|
76 |
|
77 |
# Obtenemos la respuesta
|
78 |
respuesta = qa.invoke(user_question)
|