aakash0563 commited on
Commit
06aad00
1 Parent(s): bf852d8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +43 -56
app.py CHANGED
@@ -4,42 +4,10 @@ import gradio as gr
4
  import os
5
  import google.generativeai as genai
6
  GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
7
-
8
-
9
  import chromadb
10
  from langchain.document_loaders import PyPDFLoader
11
  from langchain.text_splitter import RecursiveCharacterTextSplitter
12
  from uuid import uuid4
13
- import gradio as gr
14
-
15
- # Now you can use hugging_face_api_key in your code
16
-
17
- genai.configure(api_key=GOOGLE_API_KEY)
18
- model = genai.GenerativeModel('gemini-pro') # Load the model
19
-
20
- def get_Answer(query):
21
- res = collection.query( # Assuming `collection` is defined elsewhere
22
- query_texts=query,
23
- n_results=2
24
- )
25
- system = f"""You are a teacher. You will be provided some context,
26
- your task is to analyze the relevant context and answer the below question:
27
- - {query}
28
- """
29
- context = " ".join([re.sub(r'[^\x00-\x7F]+', ' ', r) for r in res['documents'][0]])
30
- prompt = f"### System: {system} \n\n ###: User: {context} \n\n ### Assistant:\n"
31
- answer = model.generate_content(prompt).text
32
- return answer
33
-
34
- # # Define the Gradio interface
35
- # iface = gr.Interface(
36
- # fn=get_Answer,
37
- # inputs=gr.Textbox(lines=5, placeholder="Ask a question"), # Textbox for query
38
- # outputs="textbox", # Display the generated answer in a textbox
39
- # title="Answer Questions with Gemini-Pro",
40
- # description="Ask a question and get an answer based on context from a ChromaDB collection.",
41
- # )
42
-
43
 
44
 
45
 
@@ -67,51 +35,70 @@ def upload_pdf(file_path):
67
  )
68
  return f"PDF Uploaded Successfully. {collection.count()} chunks stored in ChromaDB"
69
 
70
- # # Define the Gradio interface
71
- # iface = gr.Interface(
72
- # fn=upload_pdf,
73
- # inputs=["file"], # Specify a file input component
74
- # outputs="textbox", # Display the output text in a textbox
75
- # title="Upload PDF to ChromaDB",
76
- # description="Upload a PDF file and store its text chunks in ChromaDB.",
77
- # )
78
 
79
- # Gradio interfaces
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80
  iface1 = gr.Interface(
81
  fn=get_Answer,
82
- inputs=gr.Textbox(lines=5, placeholder="Ask a question"),
83
- outputs="textbox",
84
  title="Answer Questions with Gemini-Pro",
85
  description="Ask a question and get an answer based on context from a ChromaDB collection.",
86
  )
 
 
 
 
87
  iface2 = gr.Interface(
88
  fn=upload_pdf,
89
- inputs=["file"],
90
- outputs="textbox",
91
  title="Upload PDF to ChromaDB",
92
  description="Upload a PDF file and store its text chunks in ChromaDB.",
93
  )
94
 
95
 
96
 
97
- # thread1 = threading.Thread(target=iface1.launch, args=(debug=True, share=True, server_port=7861))
98
- # thread2 = threading.Thread(target=iface2.launch, args=(debug=True, share=True, server_port=7862))
 
 
 
99
 
100
- debug = True # Define the variables
101
- share = True
 
 
102
 
103
- thread1 = threading.Thread(
104
- target=iface1.launch, args=(debug, share, server_port:=8080) # Pass the variables
105
- )
106
- thread2 = threading.Thread(
107
- target=iface2.launch, args=(debug, share, server_port:=8081) # Pass the variables
 
108
  )
109
 
 
110
 
111
 
112
 
113
- thread1.start()
114
- thread2.start()
115
 
116
 
117
 
 
4
  import os
5
  import google.generativeai as genai
6
  GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
 
 
7
  import chromadb
8
  from langchain.document_loaders import PyPDFLoader
9
  from langchain.text_splitter import RecursiveCharacterTextSplitter
10
  from uuid import uuid4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
 
13
 
 
35
  )
36
  return f"PDF Uploaded Successfully. {collection.count()} chunks stored in ChromaDB"
37
 
38
+ # Now you can use hugging_face_api_key in your code
39
+
40
+ genai.configure(api_key=GOOGLE_API_KEY)
41
+ model = genai.GenerativeModel('gemini-pro') # Load the model
 
 
 
 
42
 
43
+ def get_Answer(query):
44
+ res = collection.query( # Assuming `collection` is defined elsewhere
45
+ query_texts=query,
46
+ n_results=2
47
+ )
48
+ system = f"""You are a teacher. You will be provided some context,
49
+ your task is to analyze the relevant context and answer the below question:
50
+ - {query}
51
+ """
52
+ context = " ".join([re.sub(r'[^\x00-\x7F]+', ' ', r) for r in res['documents'][0]])
53
+ prompt = f"### System: {system} \n\n ###: User: {context} \n\n ### Assistant:\n"
54
+ answer = model.generate_content(prompt).text
55
+ return answer
56
+
57
+ # Define the Gradio interface
58
  iface1 = gr.Interface(
59
  fn=get_Answer,
60
+ inputs=gr.Textbox(lines=5, placeholder="Ask a question"), # Textbox for query
61
+ outputs="textbox", # Display the generated answer in a textbox
62
  title="Answer Questions with Gemini-Pro",
63
  description="Ask a question and get an answer based on context from a ChromaDB collection.",
64
  )
65
+
66
+
67
+
68
+ # Define the Gradio interface
69
  iface2 = gr.Interface(
70
  fn=upload_pdf,
71
+ inputs=["file"], # Specify a file input component
72
+ outputs="textbox", # Display the output text in a textbox
73
  title="Upload PDF to ChromaDB",
74
  description="Upload a PDF file and store its text chunks in ChromaDB.",
75
  )
76
 
77
 
78
 
79
+ def check_boxes(checkbox_values):
80
+ if checkbox_values == "iface1":
81
+ return iface1.launch(debug=True, share=True)
82
+ else:
83
+ return iface1.launch(debug=True, share=True)
84
 
85
+ checkboxes = [
86
+ gr.Checkbox(label="iface1"),
87
+ gr.Checkbox(label="iface2"),
88
+ ]
89
 
90
+ iface = gr.Interface(
91
+ check_boxes,
92
+ inputs=checkboxes,
93
+ outputs="text",
94
+ title="Checkbox Demo",
95
+ description="Select one or both checkboxes.",
96
  )
97
 
98
+ iface.launch(debug=True, share=True)
99
 
100
 
101
 
 
 
102
 
103
 
104