Spaces:
Running
Running
File size: 1,815 Bytes
47d89c5 b5ae972 47d89c5 0c204c8 47d89c5 789d6bf a80eae4 0aa4549 47d89c5 0aa4549 1a65ea9 9ab9c0e a80eae4 47d89c5 0aa4549 64da101 47d89c5 64da101 a99d103 64da101 0aa4549 47d89c5 2bbbe1b 47d89c5 ed65bd3 9e5cd5f 47d89c5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 |
import openai
import json
import gradio as gr
import os
from openai import OpenAI
#from llama_index import set_global_service_context
#set_global_service_context(service_context)
# retrieve secret key from HF settings
#openai.api_key = os.getenv("OPENAI_API_KEY")
# rebuild storage context and load knowledge index
from llama_index.core import StorageContext, load_index_from_storage
storage_context = StorageContext.from_defaults(persist_dir='./')
index = load_index_from_storage(storage_context)
class Chatbot:
def __init__(self, api_key, index):
self.index = index
openai.api_key = api_key
def generate_response(self, user_input):
query_engine = index.as_query_engine()
response = query_engine.query(user_input)
message = {"role": "assistant", "content": response.response}
return message
def create_bot(input):
bot = Chatbot(os.getenv("OPENAI_API_KEY"), index=index)
# use moderations endpoint to check input
if input:
client = OpenAI()
response_mod = client.moderations.create(input=input)
response_dict = response_mod.model_dump()
flagged = response_dict['results'][0]['flagged']
#print("Flagged:", flagged)
if not flagged:
response_bot = bot.generate_response(input)
output = response_bot['content']
else:
output = "Invalid request."
return output
inputs = gr.Textbox(lines=7, label="Ask questions related to the course.")
outputs = gr.Textbox(label="Reply")
iface = gr.Interface(fn=create_bot, inputs=inputs, outputs=outputs, title="Virtual TA",
description="This is a prototype of learning assistant designed for online course. Powered by ChatGPT.",
theme="compact")
iface.launch(inline=True)
|