import requests import os from openai import OpenAI from fastapi import FastAPI, Body import gradio as gr CUSTOM_PATH = "/" client = OpenAI(api_key=os.environ['OPEN_API_KEY']) response = client.completions.create( model="gpt-3.5-turbo-instruct", prompt="Write a tagline for an ice cream shop." ) # FastAPI app = FastAPI() # FastAPI Endpoints # @app.get("/") # def base(): # return {"message": "This is your API base URL"} @app.post("/chat") async def chat(body: dict = Body(...)): if(body["context"] == "Grammar Checker"): content = "You are a grammar expert. Grammar check and suggest with the corrected paragraph. Also calculate a confidence score for the generated content considering the punctuations, correctness, clarity, enganement and delivery" elif(body["context"] == "Gift Recommender"): content = "You are a Gift Recommender. Suggest Gifts based on the age and gender." else: content = "You are a helpful assistant." response = client.chat.completions.create( model="gpt-3.5-turbo", messages=[ {"role": "system", "content": content+" The output should be in more natural tone."}, {"role": "user", "content": body['prompt']} ] ) return response def converse(inp, history, context): response = requests.post("http://localhost:8000/chat", json={"prompt": inp, "context": context}) return response.json()['choices'][0]['message']['content'] # Define the title and description of the interface to greet the user title = "Welcome to the Chatbot Interface!" description = "Feel free to start a conversation with the chatbot." dropdown = gr.Dropdown( ["Assistant", "Grammar Checker", "Gift Recommender"], label="Context", value="Assistant" ) # Launching the Chat chatapp = gr.ChatInterface( fn=converse, title=title, description=description, additional_inputs=[dropdown] ) # Mounting the chatapp to the FastAPI via custom path app = gr.mount_gradio_app(app, chatapp, path=CUSTOM_PATH) # Run this from the terminal as you would normally start a FastAPI app: `uvicorn run:app` # and navigate to http://localhost:8000/gradio in your browser.