Spaces:
Runtime error
Runtime error
import openai | |
import json | |
import gradio as gr | |
import os | |
from openai import OpenAI | |
#from llama_index import set_global_service_context | |
#set_global_service_context(service_context) | |
# retrieve secret key from HF settings | |
#openai.api_key = os.getenv("OPENAI_API_KEY") | |
# rebuild storage context and load knowledge index | |
from llama_index.core import StorageContext, load_index_from_storage | |
storage_context = StorageContext.from_defaults(persist_dir='./') | |
index = load_index_from_storage(storage_context) | |
class Chatbot: | |
def __init__(self, api_key, index): | |
self.index = index | |
openai.api_key = api_key | |
def generate_response(self, user_input): | |
query_engine = index.as_query_engine() | |
response = query_engine.query(user_input) | |
message = {"role": "assistant", "content": response.response} | |
return message | |
def create_bot(input): | |
bot = Chatbot(os.getenv("sk-proj-izWrxKNpJzcpi7BLTAz1T3BlbkFJQKPvwCzS9GmWBwD0cPE3"), index=index) | |
# use moderations endpoint to check input | |
if input: | |
client = OpenAI() | |
response_mod = client.moderations.create(input=input) | |
response_dict = response_mod.model_dump() | |
flagged = response_dict['results'][0]['flagged'] | |
#print("Flagged:", flagged) | |
if not flagged: | |
response_bot = bot.generate_response(input) | |
output = response_bot['content'] | |
else: | |
output = "Invalid request." | |
return output | |
inputs = gr.Textbox(lines=7, label="Ask questions related to the course.") | |
outputs = gr.Textbox(label="Reply") | |
iface = gr.Interface(fn=create_bot, inputs=inputs, outputs=outputs, title="Virtual TA", | |
description="This is a prototype of learning assistant designed for marketing courses. Powered by ChatGPT 3.5.", | |
theme="compact") | |
iface.launch(inline=True) | |