Sambhavnoobcoder commited on
Commit
ff461f9
1 Parent(s): 3346531

final version of app.py (#3)

Browse files

- final version of app.py (d2dd8e35a9859871788c7ee22d6ef38dfaed80bd)

Files changed (1) hide show
  1. app.py +40 -56
app.py CHANGED
@@ -6,26 +6,11 @@ from sentence_transformers import SentenceTransformer
6
  from bs4 import BeautifulSoup
7
  import gradio as gr
8
 
9
- # Configure Gemini API key
10
- gemini_api_secret_name = 'AIzaSyA0yLvySmj8xjMd0sedSgklg1fj0wBDyyw'
11
-
12
- from google.colab import userdata
13
-
14
- try:
15
- GOOGLE_API_KEY = userdata.get(gemini_api_secret_name)
16
- genai.configure(api_key=GOOGLE_API_KEY)
17
- except userdata.SecretNotFoundError as e:
18
- print(f'Secret not found\n\nThis expects you to create a secret named {gemini_api_secret_name} in Colab\n\nVisit https://makersuite.google.com/app/apikey to create an API key\n\nStore that in the secrets section on the left side of the notebook (key icon)\n\nName the secret {gemini_api_secret_name}')
19
- raise e
20
- except userdata.NotebookAccessError as e:
21
- print(f'You need to grant this notebook access to the {gemini_api_secret_name} secret in order for the notebook to access Gemini on your behalf.')
22
- raise e
23
- except Exception as e:
24
- # unknown error
25
- print(f"There was an unknown error. Ensure you have a secret {gemini_api_secret_name} stored in Colab and it's a valid key from https://makersuite.google.com/app/apikey")
26
- raise e
27
-
28
- # Fetch lecture notes and model architectures
29
  def fetch_lecture_notes():
30
  lecture_urls = [
31
  "https://stanford-cs324.github.io/winter2022/lectures/introduction/",
@@ -43,7 +28,7 @@ def fetch_lecture_notes():
43
  print(f"Failed to fetch content from {url}, status code: {response.status_code}")
44
  return lecture_texts
45
 
46
- def fetch_model_architectures():
47
  url = "https://github.com/Hannibal046/Awesome-LLM#milestone-papers"
48
  response = requests.get(url)
49
  if response.status_code == 200:
@@ -53,7 +38,7 @@ def fetch_lecture_notes():
53
  print(f"Failed to fetch model architectures, status code: {response.status_code}")
54
  return "", url
55
 
56
- # Extract text from HTML content
57
  def extract_text_from_html(html_content):
58
  soup = BeautifulSoup(html_content, 'html.parser')
59
  for script in soup(["script", "style"]):
@@ -61,110 +46,109 @@ def extract_text_from_html(html_content):
61
  text = soup.get_text(separator="\n", strip=True)
62
  return text
63
 
64
- # Generate embeddings using SentenceTransformers
65
  def create_embeddings(texts, model):
66
  texts_only = [text for text, _ in texts]
67
  embeddings = model.encode(texts_only)
68
  return embeddings
69
 
70
- # Initialize FAISS index
71
  def initialize_faiss_index(embeddings):
72
  dimension = embeddings.shape[1] # Assuming all embeddings have the same dimension
73
  index = faiss.IndexFlatL2(dimension)
74
  index.add(embeddings.astype('float32'))
75
  return index
76
 
77
- # Handle natural language queries
78
  conversation_history = []
79
 
80
- def handle_query(query, faiss_index, embeddings_texts, model):
81
  global conversation_history
82
 
83
- query_embedding = model.encode([query]).astype('float32')
84
 
85
- # Search FAISS index
86
  _, indices = faiss_index.search(query_embedding, 3) # Retrieve top 3 results
87
  relevant_texts = [embeddings_texts[idx] for idx in indices[0]]
88
 
89
- # Combine relevant texts and truncate if necessary
90
  combined_text = "\n".join([text for text, _ in relevant_texts])
91
  max_length = 500 # Adjust as necessary
92
  if len(combined_text) > max_length:
93
  combined_text = combined_text[:max_length] + "..."
94
 
95
- # Generate a response using Gemini
96
  try:
97
  response = genai.generate_text(
98
  model="models/text-bison-001",
99
  prompt=f"Based on the following context:\n\n{combined_text}\n\nAnswer the following question: {query}",
100
  max_output_tokens=200
101
  )
102
- generated_text = response.result
103
  except Exception as e:
104
  print(f"Error generating text: {e}")
105
  generated_text = "An error occurred while generating the response."
106
 
107
- # Update conversation history
108
- conversation_history.append(f"User: {query}")
109
- conversation_history.append(f"System: {generated_text}")
110
 
111
- # Extract sources
112
  sources = [url for _, url in relevant_texts]
113
 
114
- return generated_text, sources
115
 
116
- def generate_concise_response(prompt, context):
117
  try:
118
  response = genai.generate_text(
119
  model="models/text-bison-001",
120
  prompt=f"{prompt}\n\nContext: {context}\n\nAnswer:",
121
  max_output_tokens=200
122
  )
123
- return response.result
124
  except Exception as e:
125
  print(f"Error generating concise response: {e}")
126
  return "An error occurred while generating the concise response."
127
 
128
- # Main function to execute the pipeline
129
- def chatbot(message , history):
130
  lecture_notes = fetch_lecture_notes()
131
  model_architectures = fetch_model_architectures()
132
 
133
- all_texts = lecture_notes + [model_architectures]
134
 
135
- # Load the SentenceTransformers model
136
  embedding_model = SentenceTransformer('paraphrase-MiniLM-L6-v2')
137
 
138
- embeddings = create_embeddings(all_texts, embedding_model)
139
 
140
- # Initialize FAISS index
141
  faiss_index = initialize_faiss_index(np.array(embeddings))
142
 
143
-
144
- response, sources = handle_query(message, faiss_index, all_texts, embedding_model)
145
  print("Query:", message)
146
  print("Response:", response)
147
  total_text = response
 
148
  if sources:
149
  print("Sources:", sources)
150
- relevant_source = ""
151
- for source in sources:
152
- relevant_source += source +"\n"
153
- total_text += "\n\nSources:\n" + relevant_source
154
-
155
  else:
156
  print("Sources: None of the provided sources were used.")
 
157
  print("----")
158
 
159
- # Generate a concise and relevant summary using Gemini
160
  prompt = "Summarize the user queries so far"
161
- user_queries_summary = " ".join(message)
162
  concise_response = generate_concise_response(prompt, user_queries_summary)
163
  print("Concise Response:")
164
  print(concise_response)
 
165
  return total_text
166
 
167
- iface = gr.ChatInterface(
 
168
  chatbot,
169
  title="LLM Research Assistant",
170
  description="Ask questions about LLM architectures, datasets, and training techniques.",
@@ -180,5 +164,5 @@ def chatbot(message , history):
180
  clear_btn="Clear",
181
  )
182
 
183
- if __name__ == "__main__":
184
- iface.launch(debug=True)
 
6
  from bs4 import BeautifulSoup
7
  import gradio as gr
8
 
9
+ # Configure Gemini API key
10
+ GOOGLE_API_KEY = 'AIzaSyA0yLvySmj8xjMd0sedSgklg1fj0wBDyyw' # Replace with your API key
11
+ genai.configure(api_key=GOOGLE_API_KEY)
12
+
13
+ # Fetch lecture notes and model architectures
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  def fetch_lecture_notes():
15
  lecture_urls = [
16
  "https://stanford-cs324.github.io/winter2022/lectures/introduction/",
 
28
  print(f"Failed to fetch content from {url}, status code: {response.status_code}")
29
  return lecture_texts
30
 
31
+ def fetch_model_architectures():
32
  url = "https://github.com/Hannibal046/Awesome-LLM#milestone-papers"
33
  response = requests.get(url)
34
  if response.status_code == 200:
 
38
  print(f"Failed to fetch model architectures, status code: {response.status_code}")
39
  return "", url
40
 
41
+ # Extract text from HTML content
42
  def extract_text_from_html(html_content):
43
  soup = BeautifulSoup(html_content, 'html.parser')
44
  for script in soup(["script", "style"]):
 
46
  text = soup.get_text(separator="\n", strip=True)
47
  return text
48
 
49
+ # Generate embeddings using SentenceTransformers
50
  def create_embeddings(texts, model):
51
  texts_only = [text for text, _ in texts]
52
  embeddings = model.encode(texts_only)
53
  return embeddings
54
 
55
+ # Initialize FAISS index
56
  def initialize_faiss_index(embeddings):
57
  dimension = embeddings.shape[1] # Assuming all embeddings have the same dimension
58
  index = faiss.IndexFlatL2(dimension)
59
  index.add(embeddings.astype('float32'))
60
  return index
61
 
62
+ # Handle natural language queries
63
  conversation_history = []
64
 
65
+ def handle_query(query, faiss_index, embeddings_texts, model):
66
  global conversation_history
67
 
68
+ query_embedding = model.encode([query]).astype('float32')
69
 
70
+ # Search FAISS index
71
  _, indices = faiss_index.search(query_embedding, 3) # Retrieve top 3 results
72
  relevant_texts = [embeddings_texts[idx] for idx in indices[0]]
73
 
74
+ # Combine relevant texts and truncate if necessary
75
  combined_text = "\n".join([text for text, _ in relevant_texts])
76
  max_length = 500 # Adjust as necessary
77
  if len(combined_text) > max_length:
78
  combined_text = combined_text[:max_length] + "..."
79
 
80
+ # Generate a response using Gemini
81
  try:
82
  response = genai.generate_text(
83
  model="models/text-bison-001",
84
  prompt=f"Based on the following context:\n\n{combined_text}\n\nAnswer the following question: {query}",
85
  max_output_tokens=200
86
  )
87
+ generated_text = response.result if response else "No response generated."
88
  except Exception as e:
89
  print(f"Error generating text: {e}")
90
  generated_text = "An error occurred while generating the response."
91
 
92
+ # Update conversation history
93
+ conversation_history.append((query, generated_text))
 
94
 
95
+ # Extract sources
96
  sources = [url for _, url in relevant_texts]
97
 
98
+ return generated_text, sources
99
 
100
+ def generate_concise_response(prompt, context):
101
  try:
102
  response = genai.generate_text(
103
  model="models/text-bison-001",
104
  prompt=f"{prompt}\n\nContext: {context}\n\nAnswer:",
105
  max_output_tokens=200
106
  )
107
+ return response.result if response else "No response generated."
108
  except Exception as e:
109
  print(f"Error generating concise response: {e}")
110
  return "An error occurred while generating the concise response."
111
 
112
+ # Main function to execute the pipeline
113
+ def chatbot(message, history):
114
  lecture_notes = fetch_lecture_notes()
115
  model_architectures = fetch_model_architectures()
116
 
117
+ all_texts = lecture_notes + [model_architectures]
118
 
119
+ # Load the SentenceTransformers model
120
  embedding_model = SentenceTransformer('paraphrase-MiniLM-L6-v2')
121
 
122
+ embeddings = create_embeddings(all_texts, embedding_model)
123
 
124
+ # Initialize FAISS index
125
  faiss_index = initialize_faiss_index(np.array(embeddings))
126
 
127
+ response, sources = handle_query(message, faiss_index, all_texts, embedding_model)
 
128
  print("Query:", message)
129
  print("Response:", response)
130
  total_text = response
131
+
132
  if sources:
133
  print("Sources:", sources)
134
+ relevant_source = "\n".join(sources)
135
+ total_text += f"\n\nSources:\n{relevant_source}"
 
 
 
136
  else:
137
  print("Sources: None of the provided sources were used.")
138
+
139
  print("----")
140
 
141
+ # Generate a concise and relevant summary using Gemini
142
  prompt = "Summarize the user queries so far"
143
+ user_queries_summary = " ".join([msg[0] for msg in history] + [message])
144
  concise_response = generate_concise_response(prompt, user_queries_summary)
145
  print("Concise Response:")
146
  print(concise_response)
147
+
148
  return total_text
149
 
150
+ # Create the Gradio interface
151
+ iface = gr.ChatInterface(
152
  chatbot,
153
  title="LLM Research Assistant",
154
  description="Ask questions about LLM architectures, datasets, and training techniques.",
 
164
  clear_btn="Clear",
165
  )
166
 
167
+ if __name__ == "__main__":
168
+ iface.launch(server_name="0.0.0.0", server_port=7860)