Spaces:
Runtime error
Runtime error
end to end working
Browse files
buster/chatbot.py
CHANGED
@@ -4,12 +4,14 @@ import pickle
|
|
4 |
import numpy as np
|
5 |
import openai
|
6 |
import pandas as pd
|
7 |
-
from docparser import EMBEDDING_MODEL
|
8 |
from openai.embeddings_utils import cosine_similarity, get_embedding
|
9 |
|
|
|
10 |
logger = logging.getLogger(__name__)
|
11 |
logging.basicConfig(level=logging.INFO)
|
12 |
|
|
|
13 |
# search through the reviews for a specific product
|
14 |
def rank_documents(df: pd.DataFrame, query: str, top_k: int = 3) -> pd.DataFrame:
|
15 |
product_embedding = get_embedding(
|
@@ -33,7 +35,7 @@ def engineer_prompt(question: str, documents: list[str]) -> str:
|
|
33 |
def get_gpt_response(question: str, df) -> str:
|
34 |
# rank the documents, get the highest scoring doc and generate the prompt
|
35 |
candidates = rank_documents(df, query=question, top_k=1)
|
36 |
-
documents = candidates.
|
37 |
prompt = engineer_prompt(question, documents)
|
38 |
|
39 |
logger.info(f"querying GPT...")
|
|
|
4 |
import numpy as np
|
5 |
import openai
|
6 |
import pandas as pd
|
7 |
+
from buster.docparser import EMBEDDING_MODEL
|
8 |
from openai.embeddings_utils import cosine_similarity, get_embedding
|
9 |
|
10 |
+
|
11 |
logger = logging.getLogger(__name__)
|
12 |
logging.basicConfig(level=logging.INFO)
|
13 |
|
14 |
+
|
15 |
# search through the reviews for a specific product
|
16 |
def rank_documents(df: pd.DataFrame, query: str, top_k: int = 3) -> pd.DataFrame:
|
17 |
product_embedding = get_embedding(
|
|
|
35 |
def get_gpt_response(question: str, df) -> str:
|
36 |
# rank the documents, get the highest scoring doc and generate the prompt
|
37 |
candidates = rank_documents(df, query=question, top_k=1)
|
38 |
+
documents = candidates.text.to_list()
|
39 |
prompt = engineer_prompt(question, documents)
|
40 |
|
41 |
logger.info(f"querying GPT...")
|
buster/data/document_embeddings.csv
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
buster/data/{sections.pkl → documents.csv}
RENAMED
Binary files a/buster/data/sections.pkl and b/buster/data/documents.csv differ
|
|
buster/docparser.py
CHANGED
@@ -5,7 +5,7 @@ import os
|
|
5 |
import pandas as pd
|
6 |
import tiktoken
|
7 |
from bs4 import BeautifulSoup
|
8 |
-
from openai.embeddings_utils import
|
9 |
|
10 |
|
11 |
EMBEDDING_MODEL = "text-embedding-ada-002"
|
@@ -90,7 +90,7 @@ def get_all_documents(root_dir: str, max_section_length: int = 3000) -> pd.DataF
|
|
90 |
|
91 |
|
92 |
def write_documents(filepath: str, documents_df: pd.DataFrame):
|
93 |
-
documents_df.to_csv(filepath)
|
94 |
|
95 |
|
96 |
def read_documents(filepath: str) -> pd.DataFrame:
|
@@ -99,27 +99,27 @@ def read_documents(filepath: str) -> pd.DataFrame:
|
|
99 |
|
100 |
def compute_n_tokens(df: pd.DataFrame) -> pd.DataFrame:
|
101 |
encoding = tiktoken.get_encoding(EMBEDDING_ENCODING)
|
102 |
-
df["n_tokens"] = df.
|
103 |
return df
|
104 |
|
105 |
|
106 |
def precompute_embeddings(df: pd.DataFrame) -> pd.DataFrame:
|
107 |
-
df["embedding"] = df.
|
108 |
return df
|
109 |
|
110 |
|
111 |
def generate_embeddings(filepath: str, output_csv: str) -> pd.DataFrame:
|
112 |
# Get all documents and precompute their embeddings
|
113 |
-
df = read_documents(filepath)
|
114 |
df = compute_n_tokens(df)
|
115 |
df = precompute_embeddings(df)
|
116 |
-
|
117 |
return df
|
118 |
|
119 |
|
120 |
if __name__ == "__main__":
|
121 |
root_dir = "/home/hadrien/perso/mila-docs/output/"
|
122 |
-
save_filepath =
|
123 |
|
124 |
# How to write
|
125 |
documents_df = get_all_documents(root_dir)
|
|
|
5 |
import pandas as pd
|
6 |
import tiktoken
|
7 |
from bs4 import BeautifulSoup
|
8 |
+
from openai.embeddings_utils import get_embedding
|
9 |
|
10 |
|
11 |
EMBEDDING_MODEL = "text-embedding-ada-002"
|
|
|
90 |
|
91 |
|
92 |
def write_documents(filepath: str, documents_df: pd.DataFrame):
|
93 |
+
documents_df.to_csv(filepath, index=False)
|
94 |
|
95 |
|
96 |
def read_documents(filepath: str) -> pd.DataFrame:
|
|
|
99 |
|
100 |
def compute_n_tokens(df: pd.DataFrame) -> pd.DataFrame:
|
101 |
encoding = tiktoken.get_encoding(EMBEDDING_ENCODING)
|
102 |
+
df["n_tokens"] = df.text.apply(lambda x: len(encoding.encode(x)))
|
103 |
return df
|
104 |
|
105 |
|
106 |
def precompute_embeddings(df: pd.DataFrame) -> pd.DataFrame:
|
107 |
+
df["embedding"] = df.text.apply(lambda x: get_embedding(x, engine=EMBEDDING_MODEL))
|
108 |
return df
|
109 |
|
110 |
|
111 |
def generate_embeddings(filepath: str, output_csv: str) -> pd.DataFrame:
|
112 |
# Get all documents and precompute their embeddings
|
113 |
+
df = read_documents(filepath)
|
114 |
df = compute_n_tokens(df)
|
115 |
df = precompute_embeddings(df)
|
116 |
+
write_documents(output_csv, df)
|
117 |
return df
|
118 |
|
119 |
|
120 |
if __name__ == "__main__":
|
121 |
root_dir = "/home/hadrien/perso/mila-docs/output/"
|
122 |
+
save_filepath = "data/documents.csv"
|
123 |
|
124 |
# How to write
|
125 |
documents_df = get_all_documents(root_dir)
|