Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -10,6 +10,13 @@ EMBEDDING_MODEL = "text-embedding-ada-002"
|
|
10 |
|
11 |
openai.api_key = os.getenv("OPENAI_API_KEY")
|
12 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
# 1) Preprocess the document library
|
14 |
df = pd.read_csv("informacion_neo_tokenizado.csv")
|
15 |
df = df.set_index(["title", "heading"])
|
@@ -84,8 +91,9 @@ def construct_prompt(question: str, context_embeddings: dict, df: pd.DataFrame)
|
|
84 |
|
85 |
chosen_sections.append(SEPARATOR + document_section.content.replace("\n", " "))
|
86 |
chosen_sections_indexes.append(str(section_index))
|
87 |
-
|
88 |
-
header = """
|
|
|
89 |
|
90 |
return header + "".join(chosen_sections) + "\n\n Q: " + question + "\n A:"
|
91 |
|
@@ -125,5 +133,15 @@ def answer_query_with_context(
|
|
125 |
def answer_question(query):
|
126 |
return answer_query_with_context(query, df, document_embeddings)
|
127 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
128 |
iface = gr.Interface(fn=answer_question, inputs="text", outputs="text")
|
129 |
iface.launch()
|
|
|
10 |
|
11 |
openai.api_key = os.getenv("OPENAI_API_KEY")
|
12 |
|
13 |
+
|
14 |
+
start_sequence = "\nAI:"
|
15 |
+
restart_sequence = "\nHuman: "
|
16 |
+
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
# 1) Preprocess the document library
|
21 |
df = pd.read_csv("informacion_neo_tokenizado.csv")
|
22 |
df = df.set_index(["title", "heading"])
|
|
|
91 |
|
92 |
chosen_sections.append(SEPARATOR + document_section.content.replace("\n", " "))
|
93 |
chosen_sections_indexes.append(str(section_index))
|
94 |
+
|
95 |
+
header = """Responde la pregunta con la mayor sinceridad posible utilizando primero el contexto proporcionado"\n\nContexto:\n"""
|
96 |
+
#header = """Answer the question as truthfully as possible using the provided context, and if the answer is not contained within the text below, say "I don't know."\n\nContext:\n"""
|
97 |
|
98 |
return header + "".join(chosen_sections) + "\n\n Q: " + question + "\n A:"
|
99 |
|
|
|
133 |
def answer_question(query):
|
134 |
return answer_query_with_context(query, df, document_embeddings)
|
135 |
|
136 |
+
|
137 |
+
def chatgpt_clone(input, history):
|
138 |
+
history = history or []
|
139 |
+
s = list(sum(history, ()))
|
140 |
+
s.append(input)
|
141 |
+
inp = ' '.join(s)
|
142 |
+
output = openai_create(inp)
|
143 |
+
history.append((input, output))
|
144 |
+
return history, history
|
145 |
+
|
146 |
iface = gr.Interface(fn=answer_question, inputs="text", outputs="text")
|
147 |
iface.launch()
|