dishav2 commited on
Commit
d007479
1 Parent(s): 756c542

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +262 -90
app.py CHANGED
@@ -1,106 +1,278 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
- from typing import Optional, Tuple
3
 
4
  import gradio as gr
5
- from langchain.chains import ConversationChain
6
- from langchain.llms import OpenAI
7
- from threading import Lock
8
-
9
-
10
- def load_chain():
11
- """Logic for loading the chain you want to use should go here."""
12
- llm = OpenAI(temperature=0)
13
- chain = ConversationChain(llm=llm)
14
- return chain
15
-
16
-
17
- def set_openai_api_key(api_key: str):
18
- """Set the api key and return chain.
19
-
20
- If no api_key, then None is returned.
21
- """
22
- if api_key:
23
- os.environ["OPENAI_API_KEY"] = api_key
24
- chain = load_chain()
25
- os.environ["OPENAI_API_KEY"] = ""
26
- return chain
27
-
28
- class ChatWrapper:
29
-
30
- def __init__(self):
31
- self.lock = Lock()
32
- def __call__(
33
- self, api_key: str, inp: str, history: Optional[Tuple[str, str]], chain: Optional[ConversationChain]
34
- ):
35
- """Execute the chat functionality."""
36
- self.lock.acquire()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
  try:
38
- history = history or []
39
- # If chain is None, that is because no API key was provided.
40
- if chain is None:
41
- history.append((inp, "Please paste your OpenAI key to use"))
42
- return history, history
43
- # Set OpenAI key
44
- import openai
45
- openai.api_key = api_key
46
- # Run chain and append input.
47
- output = chain.run(input=inp)
48
- history.append((inp, output))
49
- except Exception as e:
50
- raise e
51
  finally:
52
- self.lock.release()
53
- return history, history
 
 
 
 
54
 
55
- chat = ChatWrapper()
56
 
57
- block = gr.Blocks(css=".gradio-container {background-color: lightgray}")
 
 
 
 
 
 
58
 
59
- with block:
60
- with gr.Row():
61
- gr.Markdown("<h3><center>Canvas Discussion Automated Grader</center></h3>")
62
 
63
- openai_api_key_textbox = gr.Textbox(
64
- placeholder="Paste your OpenAI API key",
65
- show_label=False,
66
- lines=1,
67
- type="password",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
  )
69
- canvas_url = gr.Textbox(
70
- placeholder="Enter your Discussion URL",
71
- show_label=False,
72
- lines=1,
73
- type="password",
74
  )
75
 
76
- chatbot = gr.Chatbot()
 
 
 
 
 
 
77
 
78
  with gr.Row():
79
- message = gr.Textbox(
80
- label="Enter your response here!",
81
- placeholder="Start typing your response and interact with the chatbot...",
82
- lines=1,
83
- )
84
- submit = gr.Button(value="Submit", variant="secondary").style(container=False)
85
-
86
- gr.HTML("Canvas Discussion Automated Grader")
87
-
88
- state = gr.State()
89
- agent_state = gr.State()
90
-
91
- gr.HTML("""<center>
92
- <a href="https://huggingface.co/spaces/dishav2/Canvas_Discussion_Automated_Grader?duplicate=true">
93
- <img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>
94
- Powered by <a href='https://github.com/hwchase17/langchain'>LangChain 🦜️🔗</a>
95
- </center>""")
96
-
97
- submit.click(chat, inputs=[openai_api_key_textbox, message, state, agent_state], outputs=[chatbot, state])
98
- message.submit(chat, inputs=[openai_api_key_textbox, message, state, agent_state], outputs=[chatbot, state])
99
-
100
- openai_api_key_textbox.change(
101
- set_openai_api_key,
102
- inputs=[openai_api_key_textbox],
103
- outputs=[agent_state],
104
  )
105
 
106
- block.launch(debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Hugging Face's logo
2
+ Hugging Face
3
+ Search models, datasets, users...
4
+ Models
5
+ Datasets
6
+ Spaces
7
+ Docs
8
+ Solutions
9
+ Pricing
10
+
11
+
12
+
13
+ Spaces:
14
+
15
+ rohan13
16
+ /
17
+ canvas-discussion-grader-with-feedback
18
+
19
+
20
+ like
21
+ 0
22
+ App
23
+ Files
24
+ Community
25
+ 1
26
+ canvas-discussion-grader-with-feedback
27
+ /
28
+ app.py
29
+ rohan13's picture
30
+ rohan13
31
+ Removing UI validations temporarily
32
+ 440deef
33
+ about 17 hours ago
34
+ raw
35
+ history
36
+ blame
37
+ contribute
38
+ delete
39
+ No virus
40
+ 7.97 kB
41
+ import asyncio
42
+ import glob
43
  import os
44
+ import time
45
 
46
  import gradio as gr
47
+ from dotenv import load_dotenv
48
+ from langchain.chat_models import ChatOpenAI
49
+ from langchain.embeddings import OpenAIEmbeddings
50
+
51
+ from grader import Grader
52
+ from grader_qa import GraderQA
53
+ from ingest import ingest_canvas_discussions
54
+ from utils import reset_folder
55
+
56
+ load_dotenv()
57
+
58
+ pickle_file = "vector_stores/canvas-discussions.pkl"
59
+ index_file = "vector_stores/canvas-discussions.index"
60
+
61
+ grading_model = 'gpt-4'
62
+ qa_model = 'gpt-4'
63
+
64
+ llm = ChatOpenAI(model_name=qa_model, temperature=0, verbose=True)
65
+ embeddings = OpenAIEmbeddings(model='text-embedding-ada-002')
66
+
67
+ grader = None
68
+ grader_qa = None
69
+
70
+
71
+ def add_text(history, text):
72
+ print("Question asked: " + text)
73
+ response = run_model(text)
74
+ history = history + [(text, response)]
75
+ print(history)
76
+ return history, ""
77
+
78
+
79
+ def run_model(text):
80
+ global grader, grader_qa
81
+ start_time = time.time()
82
+ print("start time:" + str(start_time))
83
+ response = grader_qa.chain(text)
84
+ sources = []
85
+ for document in response['source_documents']:
86
+ sources.append(str(document.metadata))
87
+
88
+ source = ','.join(set(sources))
89
+ response = response['answer'] + '\nSources: ' + str(len(sources))
90
+ end_time = time.time()
91
+ # # If response contains string `SOURCES:`, then add a \n before `SOURCES`
92
+ # if "SOURCES:" in response:
93
+ # response = response.replace("SOURCES:", "\nSOURCES:")
94
+ response = response + "\n\n" + "Time taken: " + str(end_time - start_time)
95
+ print(response)
96
+ print(sources)
97
+ print("Time taken: " + str(end_time - start_time))
98
+ return response
99
+
100
+
101
+ def set_model(history):
102
+ history = get_first_message(history)
103
+ return history
104
+
105
+ def ingest(url, canvas_api_key, history):
106
+ global grader, llm, embeddings
107
+ text = f"Downloaded discussion data from {url} to start grading"
108
+ ingest_canvas_discussions(url, canvas_api_key)
109
+ grader = Grader(grading_model)
110
+ response = "Ingested canvas data successfully"
111
+ history = history + [(text, response)]
112
+ return history
113
+
114
+
115
+ def start_grading(history):
116
+ global grader, grader_qa
117
+ text = f"Start grading discussions from {url}"
118
+ if grader:
119
+ # if grader.llm.model_name != grading_model:
120
+ # grader = Grader(grading_model)
121
+ # Create a new event loop
122
+ loop = asyncio.new_event_loop()
123
+ asyncio.set_event_loop(loop)
124
  try:
125
+ # Use the event loop to run the async function
126
+ loop.run_until_complete(grader.run_chain())
127
+ grader_qa = GraderQA(grader, embeddings)
128
+ response = "Grading done"
 
 
 
 
 
 
 
 
 
129
  finally:
130
+ # Close the loop after use
131
+ loop.close()
132
+ else:
133
+ response = "Please ingest data before grading"
134
+ history = history + [(text, response)]
135
+ return history
136
 
 
137
 
138
+ def start_downloading():
139
+ files = glob.glob("output/*.csv")
140
+ if files:
141
+ file = files[0]
142
+ return gr.outputs.File(file)
143
+ else:
144
+ return "File not found"
145
 
 
 
 
146
 
147
+ def get_first_message(history):
148
+ global grader_qa
149
+ history = [(None,
150
+ 'Get feedback on your canvas discussions. Add your discussion url and get your discussions graded in instantly.')]
151
+ return get_grading_status(history)
152
+
153
+
154
+ def get_grading_status(history):
155
+ global grader, grader_qa
156
+ # Check if grading is complete
157
+ if os.path.isdir('output') and len(glob.glob("output/*.csv")) > 0 and len(glob.glob("docs/*.json")) > 0 and len(
158
+ glob.glob("docs/*.html")) > 0:
159
+ if not grader:
160
+ grader = Grader(qa_model)
161
+ grader_qa = GraderQA(grader, embeddings)
162
+ elif not grader_qa:
163
+ grader_qa = GraderQA(grader, embeddings)
164
+ if len(history) == 1:
165
+ history = history + [(None, 'Grading is already complete. You can now ask questions')]
166
+ # enable_fields(False, False, False, False, True, True, True)
167
+ # Check if data is ingested
168
+ elif len(glob.glob("docs/*.json")) > 0 and len(glob.glob("docs/*.html")):
169
+ if not grader_qa:
170
+ grader = Grader(qa_model)
171
+ if len(history) == 1:
172
+ history = history + [(None, 'Canvas data is already ingested. You can grade discussions now')]
173
+ # enable_fields(False, False, False, True, True, False, False)
174
+ else:
175
+ history = history + [(None, 'Please ingest data and start grading')]
176
+ # enable_fields(True, True, True, True, True, False, False)
177
+ return history
178
+
179
+
180
+ # handle enable/disable of fields
181
+ def enable_fields(url_status, canvas_api_key_status, submit_status, grade_status,
182
+ download_status, chatbot_txt_status, chatbot_btn_status):
183
+ url.update(interactive=url_status)
184
+ canvas_api_key.update(interactive=canvas_api_key_status)
185
+ submit.update(interactive=submit_status)
186
+ grade.update(interactive=grade_status)
187
+ download.update(interactive=download_status)
188
+ txt.update(interactive=chatbot_txt_status)
189
+ ask.update(interactive=chatbot_btn_status)
190
+
191
+ if not chatbot_txt_status:
192
+ txt.update(placeholder="Please grade discussions first")
193
+ else:
194
+ txt.update(placeholder="Ask a question")
195
+ if not url_status:
196
+ url.update(placeholder="Data already ingested")
197
+ if not canvas_api_key_status:
198
+ canvas_api_key.update(placeholder="Data already ingested")
199
+ return url, canvas_api_key, submit, grade, download, txt, ask
200
+
201
+
202
+ def reset_data(history):
203
+ # Use shutil.rmtree() to delete output, docs, and vector_stores folders, reset grader and grader_qa, and get_grading_status, reset and return history
204
+ global grader, grader_qa
205
+ reset_folder('output')
206
+ reset_folder('docs')
207
+ reset_folder('vector_stores')
208
+ grader = None
209
+ grader_qa = None
210
+ history = [(None, 'Data reset successfully')]
211
+ return history
212
+
213
+
214
+ def bot(history):
215
+ return get_grading_status(history)
216
+
217
+
218
+ with gr.Blocks() as demo:
219
+ gr.Markdown(f"<h2><center>{'Canvas Discussion Grading With Feedback'}</center></h2>")
220
+
221
+ with gr.Row():
222
+ url = gr.Textbox(
223
+ label="Canvas Discussion URL",
224
+ placeholder="Enter your Canvas Discussion URL"
225
  )
226
+
227
+ canvas_api_key = gr.Textbox(
228
+ label="Canvas API Key",
229
+ placeholder="Enter your Canvas API Key", type="password"
 
230
  )
231
 
232
+ with gr.Row():
233
+ submit = gr.Button(value="Submit", variant="secondary", )
234
+ grade = gr.Button(value="Grade", variant="secondary")
235
+ download = gr.Button(value="Download", variant="secondary")
236
+ reset = gr.Button(value="Reset", variant="secondary")
237
+
238
+ chatbot = gr.Chatbot([], label="Chat with grading results", elem_id="chatbot", height=400)
239
 
240
  with gr.Row():
241
+ with gr.Column(scale=3):
242
+ txt = gr.Textbox(
243
+ label="Ask questions about how students did on the discussion",
244
+ placeholder="Enter text and press enter, or upload an image", lines=1
245
+ )
246
+ ask = gr.Button(value="Ask", variant="secondary", scale=1)
247
+
248
+ chatbot.value = get_first_message([])
249
+ submit.click(ingest, inputs=[url, canvas_api_key, chatbot], outputs=[chatbot],
250
+ postprocess=False).then(
251
+ bot, chatbot, chatbot
252
+ )
253
+
254
+ grade.click(start_grading, inputs=[chatbot], outputs=[chatbot],
255
+ postprocess=False).then(
256
+ bot, chatbot, chatbot
257
+ )
258
+
259
+ download.click(start_downloading, inputs=[], outputs=[chatbot], postprocess=False).then(
260
+ bot, chatbot, chatbot
 
 
 
 
 
261
  )
262
 
263
+ txt.submit(add_text, [chatbot, txt], [chatbot, txt], postprocess=False).then(
264
+ bot, chatbot, chatbot
265
+ )
266
+
267
+ ask.click(add_text, inputs=[chatbot, txt], outputs=[chatbot, txt], postprocess=False, ).then(
268
+ bot, chatbot, chatbot
269
+ )
270
+
271
+ reset.click(reset_data, inputs=[chatbot], outputs=[chatbot], postprocess=False, show_progress=True, ).success(
272
+ bot, chatbot, chatbot)
273
+
274
+ if __name__ == "__main__":
275
+ demo.queue()
276
+ demo.queue(concurrency_count=5)
277
+ demo.launch(debug=True, )
278
+