Spaces:
Sleeping
Sleeping
File size: 1,800 Bytes
5641d8c 82a3846 5641d8c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 |
import openai
import gradio as gr
import json
openai.api_key = "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"
def save_conversation():
with open('conversation.json', 'w') as f:
json.dump(messages, f)
def load_conversation():
try:
with open('conversation.json', 'r') as f:
return json.load(f)
except FileNotFoundError:
return []
messages = load_conversation()
if not messages:
messages.append({"role": "system", "content": "You are a knowledgeable assistant specialized in recruiting and hiring, and familiar with ADP Workforce Now Recruitment and various hiring and CRM tools."})
def CustomChatGPT(user_input):
messages.append({"role": "user", "content": user_input})
# Ensure the conversation fits within the model's maximum token limit
conversation = messages[-4096:]
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=conversation,
max_tokens=1000,
temperature=0.7)
except openai.api_resources.request_error.RequestError as e:
print(f"Received error from OpenAI: {e}")
return "I'm sorry, but I'm unable to generate a response at this time."
ChatGPT_reply = response["choices"][0]["message"]["content"]
messages.append({"role": "assistant", "content": ChatGPT_reply})
save_conversation()
return ChatGPT_reply
interface = gr.Interface(fn=CustomChatGPT,
inputs="textbox",
outputs="textbox",
title="HR HELPER",
description="Chat with a specialized assistant that can answer questions about recruiting, hiring, and various HR and CRM tools. Developed by A. Leschik.")
interface.launch()
|