Jhoeel Luna commited on
Commit
fc870a0
0 Parent(s):

Duplicate from NeoConsulting/Jarvis_QuAn_v01

Browse files
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ olympics_sections_document_embeddings.csv filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Jarvis QuAn v01
3
+ emoji: 🌖
4
+ colorFrom: gray
5
+ colorTo: yellow
6
+ sdk: gradio
7
+ sdk_version: 3.19.1
8
+ python_version: 3.11.2
9
+ app_file: app.py
10
+ pinned: false
11
+ license: openrail
12
+ duplicated_from: NeoConsulting/Jarvis_QuAn_v01
13
+ ---
14
+
15
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import numpy as np
3
+ import openai
4
+ import pandas as pd
5
+ import tiktoken
6
+ import gradio as gr
7
+
8
+ COMPLETIONS_MODEL = "text-davinci-003"
9
+ EMBEDDING_MODEL = "text-embedding-ada-002"
10
+
11
+ openai.api_key = os.getenv("OPENAI_API_KEY")
12
+
13
+ # 1) Preprocess the document library
14
+ df = pd.read_csv("olympics_sections_text.csv")
15
+ df = df.set_index(["title", "heading"])
16
+
17
+ def get_embedding(text: str, model: str=EMBEDDING_MODEL) -> list[float]:
18
+ result = openai.Embedding.create(
19
+ model=model,
20
+ input=text
21
+ )
22
+ return result["data"][0]["embedding"]
23
+
24
+ # uncomment the below line to caculate embeddings from scratch. ========
25
+
26
+ #def compute_doc_embeddings(df: pd.DataFrame) -> dict[tuple[str, str], list[float]]:
27
+ # return {
28
+ # idx: get_embedding(r.content) for idx, r in df.iterrows()
29
+ # }
30
+
31
+ #document_embeddings = compute_doc_embeddings(df)
32
+
33
+ def load_embeddings(fname: str) -> dict[tuple[str, str], list[float]]:
34
+ """
35
+ Read the document embeddings and their keys from a CSV.
36
+
37
+ fname is the path to a CSV with exactly these named columns:
38
+ "title", "heading", "0", "1", ... up to the length of the embedding vectors.
39
+ """
40
+
41
+ df = pd.read_csv(fname, header=0)
42
+ max_dim = max([int(c) for c in df.columns if c != "title" and c != "heading"])
43
+ return {
44
+ (r.title, r.heading): [r[str(i)] for i in range(max_dim + 1)] for _, r in df.iterrows()
45
+ }
46
+
47
+ document_embeddings = load_embeddings("olympics_sections_document_embeddings.csv")
48
+
49
+ # 2) Find the most similar document embeddings to the question embedding
50
+
51
+ def vector_similarity(x: list[float], y: list[float]) -> float:
52
+ """
53
+ Returns the similarity between two vectors.
54
+
55
+ Because OpenAI Embeddings are normalized to length 1, the cosine similarity is the same as the dot product.
56
+ """
57
+ return np.dot(np.array(x), np.array(y))
58
+
59
+ def order_document_sections_by_query_similarity(query: str, contexts: dict[(str, str), np.array]) -> list[(float, (str, str))]:
60
+ """
61
+ Find the query embedding for the supplied query, and compare it against all of the pre-calculated document embeddings
62
+ to find the most relevant sections.
63
+
64
+ Return the list of document sections, sorted by relevance in descending order.
65
+ """
66
+ query_embedding = get_embedding(query)
67
+
68
+ document_similarities = sorted([
69
+ (vector_similarity(query_embedding, doc_embedding), doc_index) for doc_index, doc_embedding in contexts.items()
70
+ ], reverse=True)
71
+
72
+ return document_similarities
73
+
74
+ # 3) Add the most relevant document sections to the query prompt
75
+
76
+ MAX_SECTION_LEN = 500
77
+ SEPARATOR = "\n* "
78
+ ENCODING = "gpt2" # encoding for text-davinci-003
79
+
80
+ encoding = tiktoken.get_encoding(ENCODING)
81
+ separator_len = len(encoding.encode(SEPARATOR))
82
+
83
+ def construct_prompt(question: str, context_embeddings: dict, df: pd.DataFrame) -> str:
84
+ """
85
+ Fetch relevant
86
+ """
87
+ most_relevant_document_sections = order_document_sections_by_query_similarity(question, context_embeddings)
88
+
89
+ chosen_sections = []
90
+ chosen_sections_len = 0
91
+ chosen_sections_indexes = []
92
+
93
+ for _, section_index in most_relevant_document_sections:
94
+ # Add contexts until we run out of space.
95
+ document_section = df.loc[section_index]
96
+
97
+ chosen_sections_len += document_section.tokens + separator_len
98
+ if chosen_sections_len > MAX_SECTION_LEN:
99
+ break
100
+
101
+ chosen_sections.append(SEPARATOR + document_section.content.replace("\n", " "))
102
+ chosen_sections_indexes.append(str(section_index))
103
+
104
+ header = """Answer the question as truthfully as possible using the provided context, and if the answer is not contained within the text below, say "I don't know."\n\nContext:\n"""
105
+
106
+ return header + "".join(chosen_sections) + "\n\n Q: " + question + "\n A:"
107
+
108
+ prompt = construct_prompt(
109
+ "Who won the 2020 Summer Olympics men's high jump?",
110
+ document_embeddings,
111
+ df
112
+ )
113
+
114
+ # 4) Answer the user's question based on the context.
115
+
116
+ COMPLETIONS_API_PARAMS = {
117
+ # We use temperature of 0.0 because it gives the most predictable, factual answer.
118
+ "temperature": 0.0,
119
+ "max_tokens": 300,
120
+ "model": COMPLETIONS_MODEL,
121
+ }
122
+
123
+ def answer_query_with_context(
124
+ query: str,
125
+ df: pd.DataFrame,
126
+ document_embeddings: dict[(str, str), np.array]
127
+ ) -> str:
128
+ prompt = construct_prompt(
129
+ query,
130
+ document_embeddings,
131
+ df
132
+ )
133
+
134
+ response = openai.Completion.create(
135
+ prompt=prompt,
136
+ **COMPLETIONS_API_PARAMS
137
+ )
138
+
139
+ return response["choices"][0]["text"].strip(" \n")
140
+
141
+ def answer_question(query):
142
+ return answer_query_with_context(query, df, document_embeddings)
143
+
144
+ iface = gr.Interface(fn=answer_question, inputs="text", outputs="text")
145
+ iface.launch()
olympics_sections_document_embeddings.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fdf6cfaa80e7db13902fbd8064942ceb7ad4861aa8bc0348d5edd89bf7f971f3
3
+ size 118432915
olympics_sections_text.csv ADDED
The diff for this file is too large to render. See raw diff
 
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ openai
2
+ tiktoken