NealCaren commited on
Commit
ae69701
0 Parent(s):

Duplicate from NealCaren/DYF

Browse files
Files changed (6) hide show
  1. .DS_Store +0 -0
  2. .gitattributes +36 -0
  3. README.md +14 -0
  4. app.py +206 -0
  5. dyf_w_embeddings.json +3 -0
  6. requirements.txt +4 -0
.DS_Store ADDED
Binary file (6.15 kB). View file
 
.gitattributes ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ dyf_w_embeddings.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: DYF
3
+ emoji: ⚡
4
+ colorFrom: purple
5
+ colorTo: gray
6
+ sdk: gradio
7
+ sdk_version: 3.39.0
8
+ app_file: app.py
9
+ pinned: false
10
+ license: cc-by-nc-4.0
11
+ duplicated_from: NealCaren/DYF
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ from glob import glob
3
+ from scipy import spatial
4
+ from collections import defaultdict
5
+
6
+ import tiktoken
7
+ import openai
8
+ import gradio as gr
9
+ from tenacity import retry, stop_after_attempt, wait_random_exponential
10
+
11
+
12
+
13
+ #df = pd.read_json('https://www.dropbox.com/scl/fi/uh964d1k6woc9wo3l2slc/dyf_w_embeddings.json?rlkey=j23j5338n4e88kvvsmj7s7aff&dl=1')
14
+ df = pd.read_json('dyf_w_embeddings.json')
15
+
16
+
17
+ GPT_MODEL = 'gpt-3.5-turbo'
18
+ EMBEDDING_MODEL = "text-embedding-ada-002"
19
+
20
+ # search function
21
+ def strings_ranked_by_relatedness(
22
+ query: str,
23
+ df: pd.DataFrame,
24
+ relatedness_fn=lambda x, y: 1 - spatial.distance.cosine(x, y),
25
+ top_n: int = 25
26
+ ) -> tuple[list[str], list[float]]:
27
+ """Returns a list of strings and relatednesses, sorted from most related to least."""
28
+ query_embedding_response = openai.Embedding.create(
29
+ model=EMBEDDING_MODEL,
30
+ input=query,
31
+ )
32
+ query_embedding = query_embedding_response["data"][0]["embedding"]
33
+ strings_and_relatednesses = [
34
+ (row["citation"]+':\n'+row["text"]+'\nINDEX:'+str(i), relatedness_fn(query_embedding, row["embedding"]))
35
+ for i, row in df.iterrows()
36
+ ]
37
+ strings_and_relatednesses.sort(key=lambda x: x[1], reverse=True)
38
+ strings, relatednesses = zip(*strings_and_relatednesses)
39
+ return strings[:top_n], relatednesses[:top_n]
40
+
41
+ def num_tokens(text: str, model: str = GPT_MODEL) -> int:
42
+ """Return the number of tokens in a string."""
43
+ encoding = tiktoken.encoding_for_model(model)
44
+ return len(encoding.encode(text))
45
+
46
+ def double_check(question, passage):
47
+
48
+ message = f'Possibly related text:{passage}\n\nSearch query: {question}'
49
+ messages = [
50
+ {"role": "system", "content": "Is the following text topically related to the search query. Answer with just Yes or No."},
51
+ {"role": "user", "content": message},
52
+ ]
53
+ response = openai.ChatCompletion.create(
54
+ model='gpt-3.5-turbo',
55
+ messages=messages,
56
+ temperature=0
57
+ )
58
+ response_message = response["choices"][0]["message"]["content"]
59
+ if 'yes' in response_message.lower():
60
+ return True
61
+ return False
62
+ return response_message
63
+
64
+ def extract_numbers_after_index(text):
65
+ numbers = []
66
+ lines = text.split("\n")
67
+
68
+ for line in lines:
69
+ if "INDEX:" in line:
70
+ index = line.split("INDEX:")[1].strip()
71
+ try:
72
+ number = int(index)
73
+ numbers.append(number)
74
+ except ValueError:
75
+ pass
76
+
77
+ return numbers
78
+
79
+
80
+ def query_message(
81
+ query: str,
82
+ df: pd.DataFrame,
83
+ model: str,
84
+ token_budget: int
85
+ ) -> str:
86
+ """Return a message for GPT, with relevant source texts pulled from a dataframe."""
87
+ strings, relatednesses = strings_ranked_by_relatedness(query, df)
88
+ introduction = 'Use the below articles written by W.E.B. Du Bois subsequent question. Write your response in the form of an four paragraph essay for a college class. If the answer cannot be found in the articles, write "I could not find an answer. Be sure to put direct quotations in quotation marks. Use in APA-Style text references where approriate.'
89
+ message = introduction
90
+ article_cites = defaultdict(int)
91
+
92
+ for counter, string in enumerate(strings):
93
+ article_cite = string.splitlines()[0]
94
+ next_article = f'\n\nDu Bois article:\n"""\n{string}\n"""'
95
+ if (
96
+ num_tokens(message + next_article + query, model=model)
97
+ > token_budget
98
+ ):
99
+ break
100
+ else:
101
+ if double_check(query, string) == True and article_cites[article_cite] <= 2:
102
+ message += next_article
103
+ article_cites[article_cite] += 1
104
+ print(article_cites)
105
+ return message + query
106
+
107
+ def remove_lines_with_index(input_string):
108
+ lines = input_string.strip().split('\n')
109
+ cleaned_lines = [line for line in lines if "INDEX:" not in line]
110
+ cleaned_string = "\n".join(cleaned_lines)
111
+ return cleaned_string
112
+
113
+ def ask(
114
+ query: str,
115
+
116
+ ) -> str:
117
+ """Answers a query using GPT and a dataframe of relevant texts and embeddings."""
118
+
119
+ model = GPT_MODEL
120
+ token_budget = 4096 - 600
121
+
122
+
123
+ message = query_message(query, df, model=model, token_budget=token_budget)
124
+
125
+ # Add references
126
+ cite_rows = extract_numbers_after_index(message)
127
+ used_df = df[df.index.isin(cite_rows)].copy()
128
+ citations = list(set(used_df['citation'].values))
129
+ if len(citations) == 0:
130
+ return "No relevant articles found. Sorry. Please try a different question."
131
+
132
+ resources = '**Resources**\n* ' + '\n* '.join(sorted(citations))
133
+ # clean up to remove index
134
+ message = remove_lines_with_index(message)
135
+
136
+
137
+ messages = [
138
+ {"role": "system", "content": "You answer questions based on the writings of W.E.B. Du Bois. All the provided texts are written by Du Bois."},
139
+ {"role": "user", "content": message},
140
+ ]
141
+ response = openai.ChatCompletion.create(
142
+ model=model,
143
+ messages=messages,
144
+ temperature=0
145
+ )
146
+ response_message = response["choices"][0]["message"]["content"]
147
+
148
+
149
+
150
+ answer = f'{resources}\n\n**Summary**\n\n{response_message}'
151
+ return answer
152
+
153
+
154
+
155
+ intro_text = '''
156
+ # W.E.B. Du Bois in the Crisis
157
+
158
+ This search engine find the most relevant articles from [Dare You Fight](https://www.dareyoufight.org), an online repository of W.E.B. Du Bois's writings in The Crisis, the official journal of the NAACP, which Du Bois founded and edited between 1911 and 1934. In addition to locating the most relevant articles, it also produces a short essay in response to your question.
159
+
160
+ **Notes:**
161
+ * Avoid using "Du Bois" in the question, as this is information is passed along behind the scenes.
162
+ * Searches can take 20 to 40 seconds.
163
+ * You may need a follow up question if your original question is only a word or two.
164
+ * The model usually looks at five or fewer relevant articles, so if you response requires more, consider refining and splitting up your question.
165
+
166
+ **Caveats:** Like all apps that employ large language models, this one has the possiblitiy for bias and confabulation. Please refer to the original articles.
167
+
168
+ '''
169
+
170
+ outro_text = '''
171
+ **Behind the Scenes**
172
+
173
+ This app uses sentence embeddings and a large language model to craft the response. Behind the scenes, it involves the following steps:
174
+
175
+ 1. Each article from Dare You Fight (or segment of the article if it's long) is converted into a fixed-length vector representation using OpenAI's text-embedding-ada-002 model. These representations are stored in a dataframe.
176
+ 2. The user's query is embedded using the same text-embedding-ada-002 model to convert it into a fixed-length vector representation.
177
+ 3. To find the most relevant articles to the query, cosine similarity is calculated between the query vector and all the article vectors. The articles with the highest cosine similarity are retrieved as the top matches.
178
+ 4. The text of each of the possibly related articles (based on Step 3) is passed to OpenAI's ChatGPT 3.5 model, along with a question asking whether the text is relevant to the search query. Only texts coded as relevant are used in subsequent steps.
179
+ 5. All of the relevant texts (from Step 4), along with the original search query, are passed to OpenAI's ChatGPT 3.5 model with specific instructions to answer the query in the form of a college essay using only the supplied texts.
180
+ '''
181
+
182
+
183
+
184
+ block = gr.Blocks(theme = 'bethecloud/storj_theme')
185
+
186
+ with block:
187
+ gr.Markdown(intro_text)
188
+
189
+ # Define the input and output blocks
190
+ input_block = gr.Textbox(label='Question')
191
+ research_btn = gr.Button(value="Ask the archive")
192
+ output_block = gr.Markdown(label="Response")
193
+ research_btn.click(ask, inputs=input_block, outputs=output_block)
194
+ gr.Examples(["What is the relationship between social, political and economic equality?",
195
+ "What is Pan-Africanism?",
196
+ "Did Du Bois support American involvement in WWI?",
197
+ "What are the most effective tactics or methods for racial equality?",
198
+ "Why was the NAACP founded and what was it's original goals?"], inputs=[input_block])
199
+ gr.Markdown(outro_text)
200
+
201
+ # Launch the interface
202
+ block.launch()
203
+
204
+
205
+
206
+
dyf_w_embeddings.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19e11cdb2873c39c9065973a98f3ca436f507935074c8c1af1d6ef82481ebe36
3
+ size 23555987
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ scipy
2
+ tiktoken
3
+ openai
4
+ tenacity