Rams901 commited on
Commit
5f90b6d
1 Parent(s): 734db66

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -78
app.py CHANGED
@@ -27,7 +27,7 @@ embeddings = HuggingFaceEmbeddings()
27
  db = FAISS.load_local('db_full', embeddings)
28
 
29
  mp_docs = {}
30
- llm = ClaudeLLM()
31
  # ChatOpenAI(
32
  # temperature=0,
33
  # model='gpt-3.5-turbo-16k'
@@ -41,7 +41,7 @@ def add_text(history, text):
41
 
42
  return history, ""
43
 
44
- pipeline = {'claude': (ClaudeLLM(), 0), 'gpt-3.5': (ChatOpenAI(temperature=0,model='gpt-3.5-turbo-16k'), 65), 'gpt-4': (ChatOpenAI(temperature=0, model='gpt-4'), 30)}
45
 
46
  def retrieve_thoughts(query, n):
47
 
@@ -95,84 +95,14 @@ def qa_retrieve(query, llm):
95
 
96
  tier_1 = thoughts['tier 1']
97
  tier_2 = thoughts['tier 2']
98
-
99
- reference = tier_1[['ref', 'url', 'title']].to_dict('records')
100
-
101
- tier_1 = list(tier_1.apply(lambda x: f"[{int(x['ref'])}] title: {x['title']}\n Content: {x.content}", axis = 1).values)
102
- print(len(tier_1))
103
- tier_2 = list(tier_2.apply(lambda x: f"title: {x['title']}\n Content: {x.content}", axis = 1).values)
104
-
105
- print(f"QUERY: {query}\nTIER 1: {tier_1}\nTIER2: {tier_2}")
106
- # print(f"DOCS RETRIEVED: {mp_docs.values}")
107
-
108
- # Cynthesis Generation
109
- session_prompt = """ A bot that is open to discussions about different cultural, philosophical and political exchanges. You will use do different analysis to the articles provided to me. Stay truthful and if you weren't provided any resources give your oppinion only."""
110
- task = """Your primary responsibility is to identify multiple themes from the given articles. For each theme detected, you are to present it under three separate categories:
111
-
112
- 1. Theme Title - An easy-to-understand title that encapsulates the core idea of the theme extracted from the article.
113
-
114
- 2. Theme Description - An expanded elaboration that explores the theme in detail based on the arguments and points provided in the article.
115
-
116
- 3. Quotes related to theme - Locate and provide at least one compelling quote from the article that directly supports or showcases the theme you have identified. This quote should serve as a specific evidence or example from the article text that corresponds directly to the developed theme.
117
 
118
- The extracted themes should be written in structured manner, ensuring clarity and meaningful correlation between the themes and the articles. Make sure your analysis is rooted in the arguments given in the article. Avoid including personal opinions or making generalizations that are not explicitly supported by the articles. """
119
-
120
-
121
- prompt = PromptTemplate(
122
- input_variables=["query", "task", "session_prompt", "articles"],
123
- template="""
124
- You are a {session_prompt}
125
- {task}
126
-
127
- query: {query}
128
-
129
- Articles:
130
- {articles}
131
-
132
-
133
- The extracted themes should be written in structured manner, ensuring clarity and meaningful correlation between the themes and the articles. Make sure your analysis is rooted in the arguments given in the article. Avoid including personal opinions or making generalizations that are not explicitly supported by the articles.
134
-
135
- """,
136
- )
137
-
138
-
139
- # llm = BardLLM()
140
- chain = LLMChain(llm=llm, prompt = prompt)
141
-
142
- response = chain.run(query=query, articles="\n".join(tier_1), session_prompt = session_prompt, task = task)
143
-
144
- for i in range(5):
145
- response = response.replace(f'[{i}]', f"<span class='text-primary'>[{i}]</span>")
146
-
147
- # Generate related questions
148
- prompt_q = PromptTemplate(
149
- input_variables=[ "session_prompt", "articles"],
150
- template="""
151
- You are a {session_prompt}
152
- Give general/global questions related the following articles:
153
-
154
- Articles:
155
- {articles}
156
-
157
-
158
- Make sure not to ask specific questions, keep them general, short and concise.
159
- """,
160
- )
161
-
162
- chain_q = LLMChain(llm=ClaudeLLM(), prompt = prompt_q)
163
-
164
- questions = chain_q.run(session_prompt = session_prompt, articles = "\n".join(tier_2), )
165
- print(questions)
166
- questions = questions[questions.index('1'):]
167
-
168
- questions = [ remove_numbers(t).strip() for (i, t) in enumerate(questions.split('.')) if len(t) > 5][:5]
169
- print(questions)
170
-
171
- # TO-DO: initiate models in another function, refactor code to be reusable
172
 
173
- # json_resp = {'cynthesis': response, 'questions': questions, 'Reference': reference}
 
 
174
 
175
- return response, {'Reference': reference}
176
 
177
  def flush():
178
  return None
@@ -184,7 +114,7 @@ examples = [
184
 
185
  demo = gr.Interface(fn=qa_retrieve, title="cicero-qa-api",
186
  inputs=gr.inputs.Textbox(lines=5, label="what would you like to learn about?"),
187
- outputs=[gr.components.Textbox(lines=3, label="Themes"),
188
  gr.components.JSON( label="Reference")],examples=examples)
189
 
190
  demo.queue(concurrency_count = 4)
 
27
  db = FAISS.load_local('db_full', embeddings)
28
 
29
  mp_docs = {}
30
+ # llm = ClaudeLLM()
31
  # ChatOpenAI(
32
  # temperature=0,
33
  # model='gpt-3.5-turbo-16k'
 
41
 
42
  return history, ""
43
 
44
+ # pipeline = {'claude': (ClaudeLLM(), 0), 'gpt-3.5': (ChatOpenAI(temperature=0,model='gpt-3.5-turbo-16k'), 65), 'gpt-4': (ChatOpenAI(temperature=0, model='gpt-4'), 30)}
45
 
46
  def retrieve_thoughts(query, n):
47
 
 
95
 
96
  tier_1 = thoughts['tier 1']
97
  tier_2 = thoughts['tier 2']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98
 
99
+ reference = tier_1[['ref', 'url', 'title', 'content','score']].to_dict('records')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
100
 
101
+ # tier_1 = list(tier_1.apply(lambda x: f"[{int(x['ref'])}] title: {x['title']}\n Content: {x.content}", axis = 1).values)
102
+ # print(len(tier_1))
103
+ # tier_2 = list(tier_2.apply(lambda x: f"title: {x['title']}\n Content: {x.content}", axis = 1).values)
104
 
105
+ return {'Reference': reference}
106
 
107
  def flush():
108
  return None
 
114
 
115
  demo = gr.Interface(fn=qa_retrieve, title="cicero-qa-api",
116
  inputs=gr.inputs.Textbox(lines=5, label="what would you like to learn about?"),
117
+ outputs=[
118
  gr.components.JSON( label="Reference")],examples=examples)
119
 
120
  demo.queue(concurrency_count = 4)