Vlad Severin commited on
Commit
7550f47
1 Parent(s): 75ac2c1

add app.py

Browse files
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ __pycache__
app.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import openai
3
+ import tiktoken
4
+ import numpy as np
5
+ import pandas as pd
6
+ import gradio as gr
7
+ import pickle
8
+
9
+ COMPLETIONS_MODEL = "text-davinci-003"
10
+ EMBEDDING_MODEL = "text-embedding-ada-002"
11
+
12
+ with open("document_embeddings_clean_code.pickle", "rb") as document_embeddings_clean_code:
13
+ document_embeddings = pickle.load(document_embeddings_clean_code)
14
+
15
+ df = pd.read_csv('./clean_code_processed.csv')
16
+ df = df.set_index(["title", "section"])
17
+ df = df[df.tokens > 40]
18
+ df_for_embeddings = df
19
+
20
+
21
+ def get_embedding(text, model=EMBEDDING_MODEL):
22
+ result = openai.Embedding.create(
23
+ model=model,
24
+ input=text
25
+ )
26
+ return result["data"][0]["embedding"]
27
+
28
+ def vector_similarity(x, y):
29
+ """
30
+ Returns the similarity between two vectors.
31
+
32
+ Because OpenAI Embeddings are normalized to length 1, the cosine similarity is the same as the dot product.
33
+ """
34
+ return np.dot(np.array(x), np.array(y))
35
+
36
+ def order_document_sections_by_query_similarity(query, contexts):
37
+ """
38
+ Find the query embedding for the supplied query, and compare it against all of the pre-calculated document embeddings
39
+ to find the most relevant sections.
40
+
41
+ Return the list of document sections, sorted by relevance in descending order.
42
+ """
43
+ query_embedding = get_embedding(query)
44
+
45
+ document_similarities = sorted([
46
+ (vector_similarity(query_embedding, doc_embedding), doc_index) for doc_index, doc_embedding in contexts.items()
47
+ ], reverse=True)
48
+
49
+ return document_similarities
50
+
51
+ def construct_prompt(question, context_embeddings, df):
52
+ """
53
+ Fetch relevant
54
+ """
55
+ most_relevant_document_sections = order_document_sections_by_query_similarity(question, context_embeddings)
56
+
57
+ chosen_sections = []
58
+ chosen_sections_len = 0
59
+ chosen_sections_indexes = []
60
+
61
+ for _, section_index in most_relevant_document_sections:
62
+ # Add contexts until we run out of space.
63
+ tokens = df._get_value(section_index, "tokens")
64
+ if type(tokens) != np.int64:
65
+ continue
66
+
67
+ chosen_sections_len += df._get_value(section_index, "tokens") + separator_len
68
+ if chosen_sections_len > MAX_SECTION_LEN:
69
+ break
70
+
71
+ chosen_sections.append(SEPARATOR + df._get_value(section_index, "content").replace("\n", " "))
72
+ chosen_sections_indexes.append(section_index)
73
+
74
+ # Useful diagnostic information
75
+ print(f"Selected {len(chosen_sections)} document sections:")
76
+ print("\n".join(str(index) for index in chosen_sections_indexes))
77
+
78
+ header = """Answer the question as truthfully as possible using the provided context, and if the answer is not contained within the text below, say "I don't know."\n\nContext:\n"""
79
+
80
+ return (header + "".join(chosen_sections) + "\n\n Q: " + question + "\n A:", chosen_sections_indexes)
81
+
82
+
83
+
84
+ MAX_SECTION_LEN = 2000
85
+ SEPARATOR = "\n* "
86
+ ENCODING = "gpt2" # encoding for text-davinci-003
87
+
88
+ encoding = tiktoken.get_encoding(ENCODING)
89
+ separator_len = len(encoding.encode(SEPARATOR))
90
+
91
+ COMPLETIONS_API_PARAMS = {
92
+ # We use temperature of 0.0 because it gives the most predictable, factual answer.
93
+ "temperature": 0.0,
94
+ "max_tokens": 1500,
95
+ "model": COMPLETIONS_MODEL,
96
+ }
97
+
98
+ def answer_query_with_context(
99
+ query,
100
+ df,
101
+ document_embeddings
102
+ ):
103
+ prompt, chosen_sections_indexes = construct_prompt(
104
+ query,
105
+ document_embeddings,
106
+ df
107
+ )
108
+
109
+ for i in range(len(chosen_sections_indexes)):
110
+ chosen_sections_indexes[i] = chosen_sections_indexes[i][0]
111
+
112
+ response = openai.Completion.create(
113
+ prompt=prompt,
114
+ **COMPLETIONS_API_PARAMS
115
+ )
116
+
117
+ return (response["choices"][0]["text"].strip(" \n"), chosen_sections_indexes)
118
+
119
+ def handle(question):
120
+ answer, related_documents = answer_query_with_context(question, df_for_embeddings, document_embeddings)
121
+ return answer + "\n\nRelated chapters:\n" + "\n".join(related_documents)
122
+
123
+ demo = gr.Interface(
124
+ fn=handle,
125
+ inputs="text",
126
+ outputs="text",
127
+ cache_examples=False,
128
+ )
129
+
130
+ demo.launch()
clean_code_processed.csv ADDED
The diff for this file is too large to render. See raw diff
 
document_embeddings_clean_code.pickle ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:600168ec8f03d7f4c56ab5493d7ed71cb283cababcc0e24121b85b9512a41914
3
+ size 2216273
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ openai
2
+ tiktoken
3
+ numpy
4
+ pandas