jeon
fix6
3311d51
import openai
import gradio as gr
import time
import os
openai.api_key = os.getenv("OPENAI_API_KEY")
def get_completion(prompt, model="gpt-3.5-turbo"):
messages = [{"role": "user", "content": prompt}]
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=0, # this is the degree of randomness of the model's output
)
return response.choices[0].message["content"]
def get_completion_from_messages(input, model="gpt-3.5-turbo", temperature=0.8):
messages = [
{'role': 'system', 'content': '๋„ˆ๋Š” ์ž๊ธฐ์†Œ๊ฐœ์„œ์— ๊ธฐ๋ฐ˜ํ•˜์—ฌ ์งˆ๋ฌธ์„ ํ•˜๋Š” ๋ฉด์ ‘๊ด€์ด์•ผ.\
๋งŒ์•ฝ ์ „๋ฌธ์šฉ์–ด๊ฐ€ ์žˆ๋‹ค๋ฉด ๊ผฌ๋ฆฌ์งˆ๋ฌธํ•ด์ค˜'},
{"role": "user","content": input }]
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=temperature, # this is the degree of randomness of the model's output
)
print(111111)
return response.choices[0].message["content"]
####
#user input
#get completion ํ†ต๊ณผ ์‹œ์ผœ์„œ ๋‹ต๋ณ€์–ป์Œ
#์ด๋•Œ ์—ญํ•  ๋ถ„๋‹ด ๋ฐ ํ”„๋กฌํ”„ํŠธ ์—”์ง€๋‹ˆ์–ด๋ง ์ง„ํ–‰
####
class Interviewer:
def __init__(self):
# Initialize the ChatBot class with an empty history
self.history = []
def predict(self, user_input):
response =get_completion_from_messages(user_input, temperature=0.8)
return response
inter = Interviewer()
title = "์ž์†Œ์„œ๊ธฐ๋ฐ˜ ๋ฉด์ ‘ ์‹œ๋ฎฌ๋ ˆ์ด์…˜ chat bot (this template based on Tonic's MistralMed Chat)"
chatbot = gr.Interface(
fn=inter.predict,
title=title,
inputs="text",
outputs="text",
)
chatbot.launch()