Update app.py
Browse files
app.py
CHANGED
@@ -13,22 +13,25 @@ st.header("Pregunta al trebep")
|
|
13 |
|
14 |
|
15 |
@st.cache_resource
|
16 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
# CARGAMOS LLM
|
18 |
os.environ["GROQ_API_KEY"] = "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"
|
19 |
model = 'llama3-8b-8192'
|
20 |
llm = ChatGroq(model = model)
|
21 |
|
22 |
-
# CARGAMOS MODELO DE EMBEDDING
|
23 |
-
model_name = 'intfloat/multilingual-e5-base'
|
24 |
-
embedding = HuggingFaceEmbeddings(model_name=model_name)
|
25 |
-
|
26 |
# CARGAMOS EL VECTORSTORE DE PINECONE
|
27 |
os.environ["PINECONE_API_KEY"] ='4bf0b4cf-4ced-4f70-8977-d60bb8ae405a'
|
28 |
index_name = "boe-intfloat-multilingual-e5-base"
|
29 |
namespace = "trebep"
|
30 |
vectorstore = PineconeVectorStore(index_name=index_name, namespace=namespace, embedding=embedding)
|
31 |
-
|
|
|
32 |
# CREAMOS EL RETRIEVAL
|
33 |
qa = RetrievalQA.from_chain_type(
|
34 |
llm=llm,
|
@@ -61,7 +64,9 @@ user_question = st.text_input("¡A jugar! Haz una pregunta al trebep:")
|
|
61 |
if user_question:
|
62 |
|
63 |
# Inicializar entorno
|
64 |
-
|
|
|
|
|
65 |
|
66 |
# Obtenemos la respuesta
|
67 |
respuesta = qa.invoke(user_question)
|
|
|
13 |
|
14 |
|
15 |
@st.cache_resource
|
16 |
+
def read():
|
17 |
+
# CARGAMOS MODELO DE EMBEDDING
|
18 |
+
model_name = 'intfloat/multilingual-e5-base'
|
19 |
+
embedding = HuggingFaceEmbeddings(model_name=model_name)
|
20 |
+
|
21 |
+
|
22 |
+
def setup():
|
23 |
# CARGAMOS LLM
|
24 |
os.environ["GROQ_API_KEY"] = "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"
|
25 |
model = 'llama3-8b-8192'
|
26 |
llm = ChatGroq(model = model)
|
27 |
|
|
|
|
|
|
|
|
|
28 |
# CARGAMOS EL VECTORSTORE DE PINECONE
|
29 |
os.environ["PINECONE_API_KEY"] ='4bf0b4cf-4ced-4f70-8977-d60bb8ae405a'
|
30 |
index_name = "boe-intfloat-multilingual-e5-base"
|
31 |
namespace = "trebep"
|
32 |
vectorstore = PineconeVectorStore(index_name=index_name, namespace=namespace, embedding=embedding)
|
33 |
+
|
34 |
+
def ask()
|
35 |
# CREAMOS EL RETRIEVAL
|
36 |
qa = RetrievalQA.from_chain_type(
|
37 |
llm=llm,
|
|
|
64 |
if user_question:
|
65 |
|
66 |
# Inicializar entorno
|
67 |
+
read()
|
68 |
+
setup()
|
69 |
+
qa = ask()
|
70 |
|
71 |
# Obtenemos la respuesta
|
72 |
respuesta = qa.invoke(user_question)
|