Spaces:
Sleeping
Sleeping
File size: 1,248 Bytes
08cf98f e018265 ae5a00c acd25a6 08cf98f b5a49a6 ccd70e5 ae5a00c ccd70e5 1e006af ccd70e5 6131adc 9bed2aa 61c162a ccd70e5 1132e63 3362104 a65ba07 08cf98f ccd70e5 8361090 2b0cb36 61c162a ccd70e5 0ad9fe5 08cf98f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 |
from openai import OpenAI
import os
api_key = os.getenv("AIJR")
import gradio as gr
client = OpenAI(api_key=api_key)
messages = [
{"role": "system", "content": "You are font line pre-sales manager named JR. Answer questions as the manager of systems engineering. Only answer technology related questions."},
]
# Limit the number of tokens
MAX_TOKENS = 150 # Adjust as needed
MAX_HISTORY_LENGTH = 5 # Limit the conversation history
def chatbot(input):
if input:
messages.append({"role": "user", "content": input})
chat = client.chat.completions.create(model="gpt-4o", messages=messages)
max_tokens=MAX_TOKENS, # Limit the number of tokens in the response
temperature=0.7, # Adjust for more deterministic responses
frequency_penalty=0.5 # Reduce verbosity
reply = chat.choices[0].message.content
messages.append({"role": "assistant", "content": reply})
return reply
# Define the Gradio Interface
demo = gr.Interface(
fn=chatbot,
inputs="textbox",
outputs="textbox",
title="JRG.PT",
description="Ask a qustion to AI JR, Replacement Director of Systems Engineering",
theme="compact"
)
# Launch the Gradio Interface
demo.launch(share=True)
|