CRMBot / app.py
ebegen's picture
Rename main.py to app.py
2d44fea
from openai import OpenAI as OPENAI
import os
import gradio as gr
import json
client = OPENAI()
client.api_key = os.environ.get("OPENAI_API_KEY")
conversation_history=[]
def predict(input):
completion = client.completion.create(
model="gpt-4",
messages=[{"role": "user", "content": input}]
)
response = completion.choices[0].messag.content
return response
def initial_prompt():
# client.api_key = os.environ.get("OPENAI_API_KEY")
f = open('templates/record_types.json')
crm_temlate_json = json.load(f)
global conversation_history
default_message = (f"""
Microsoft Dynamic CRM için kayıt olusturmada yardımcı bir asistan gibi davranmanı istiyorum.
Senin isin yalnizca CRM kayit olsuturma ve yonetme konusunda yardimci olmak, bu sebeple kullanici
CRM haricinde baska bir konuda konusmak isterse onca kibarca gorevini hatirlatarak istedigi islemi
yapamayacagini belirtiyorsun. Kullanici seninle iletisime gectiginde kendini CRM Asistan Bot olarak
tanitarak nasil yardimci olabilecegini soracaksin.
Kullanicinin yapmak istedigi isleme gore {crm_temlate_json} json verisi icinden 'Contact' yada 'Aktivite'
templatelerine gore kullanicin girmesi gereken input verilerini isteyeceksin.
Kullanıcı seninle iletişime geçtiğinde amaçlarını ögrenip, amaclanan kayit icin ihtiyac olabilecek
alan bilgilerini sirasiyla, adim adim kullanicidan isteyecek ve islemler bittiginda bu bilgileri
json olarak doneceksin. Kullanici kayit olusturman icin onaylamadigi surece
ek bilgi ekleyip eklemek istemedigini soracaksin. Ne olursa olsun kullanicinin belirtmesi gereken bilgileri kendin girmeyeceksin.
Kullanici ek bilgi eklemek isterse sirasiyla o bilgileri isteyerek olusturacagin kayit bilgisine ekleyeceksin.
Ornek: [Kullanici:"Merhaba", Asistan:"Merhaba! Size nasil yardimci olabilirim?",
Kullanici: "Kontakt kaydi olusturmak istiyorum", ...]. Ilk kayit ile baslayalim
""")
conversation_history.append({"role": "system", "content": f"{default_message}"})
completion = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "system", "content": " ".join([str(item) for item in conversation_history])}],
temperature=0.3,
max_tokens=7500,
)
message = completion.choices[0].message.content
print(message)
initial_prompt()
def handle_input(input_str : str):
global conversation_history
if len(conversation_history) >=20:
conversation_history = conversation_history[:1] + conversation_history[-10:]
conversation_history.append({"role": "user", "content": f"{input_str}"})
content = " ".join([str(item) for item in conversation_history])
completion = client.chat.completions.create(
model="gpt-4",
messages=conversation_history,
# temperature=temperature,
# max_tokens=max_tokens,
# presence_penalty=presence_penalty,
# frequency_penalty=frequency_penalty,
# top_p = top_p_input,
#stream = stream_input
)
message = completion.choices[0].message.content
conversation_history.append({"role": "assistant", "content": f"{message}"})
def get_response_again(content):
while True:
completion = client.chat.completion.create(
model="gpt-4",
messages=[{"role": "assistant", "content": content}],
# temperature=temperature,
# max_tokens=max_tokens,
# presence_penalty=presence_penalty,
# frequency_penalty=frequency_penalty,
# top_p = top_p_input,
#stream = stream_input
)
message = completion.choices[0].message.content
if "Müşteri" not in message:
break
return message
# if "Müşteri" in message:
# get_response_again(content)
# conversation_history.append(f"{message}\n")
return message
def get_model_reply(query,context=[]):
context += [query]
# client.api_key = api_key
response = handle_input(query)
context += [response]
responses = [(u,b) for u,b in zip(context[::2], context[1::2])]
return responses, context
#TODO Thiws feature will be added later
def speech_2_text(audio,api_key, context=[]):
client.api_key = api_key
audio_file= open(audio, "rb")
transcript = client.Audio.transcribe("whisper-1", audio_file)
prompt = transcript.text
context += [prompt]
completion = client.chat.completion.create(
model="gpt-4",
messages=[{"role": "user", "content": prompt}],
max_tokens=2500,
)
response = completion.choices[0].message.content
context += [response]
responses = [(u,b) for u,b in zip(context[::2], context[1::2])]
return responses, context
# def get_audio(url,api_key,context=[]):
client.api_key = api_key
destination = f"./audio/test"
try:
ydl_opts = {
'format': 'bestaudio/best',
'outtmpl':destination,
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',
}],
}
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
ydl.download([url])
audio_file= open(f'{destination}.mp3', "rb")
transcript = client.Audio.transcribe("whisper-1", audio_file)
context += [transcript.text]
responses = [(u,b) for u,b in zip(context[::2], context[1::2])]
return responses, context
except Exception as e:
print("Connection Error")
with gr.Blocks(css="#chatbot {overflow-y:auto; height:400px;}") as dialog_app:
with gr.Tab("ChatBot"):
with gr.Row():
# with gr.Column(scale=1, min_width=600):
# api_key = gr.Textbox(label="Your API Key", type="password")
# temperature_input = gr.Slider(minimum=0, maximum=1.0, default=0.5, step=0.01, label="Temperature")
# max_tokens_input = gr.inputs.Slider(minimum=1, maximum=2048, default=50, step=10, label="Max Tokens")
# top_p_input = gr.inputs.Slider(minimum=0.1, maximum=1.0, default=0.5, step=0.01, label="Top P")
# presence_penalty_input = gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.0, step=0.1, label="Presence Penalty")
# frequency_penalty_input = gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.0, step=0.1, label="Frequency Penalty")
#stream_input = gr.inputs.Checkbox(label="Stream")
with gr.Column(scale=2, min_width=600):
chatbot = gr.Chatbot(elem_id="chatbot")
state = gr.State([])
txt = gr.Textbox(
show_label=False,
placeholder="Enter text and press enter",
container=False
)
# txt.submit(get_model_reply, [txt, api_key, temperature_input, max_tokens_input,top_p_input, presence_penalty_input,frequency_penalty_input, state], [chatbot, state])
txt.submit(get_model_reply, [txt, state], [chatbot, state])
txt.submit(lambda :"", None, txt)
# with gr.Tab("Voice Chat"):
# with gr.Row():
# with gr.Column(scale=1, min_width=600):
# voice_api_key = gr.Textbox(label="Your API Key", type="password")
# voice_state = gr.State([])
# youtube_url = gr.Textbox(
# show_label=False,
# type="text",
# placeholder="Enter an Youtube URL")
# mic_audio = gr.Audio(source="microphone", type="filepath")
# with gr.Row():
# with gr.Column(scale=1, min_width=250):
# audio_clean_btn = gr.Button(value='Clean')
# with gr.Column(scale=2, min_width=250):
# audio_sbmt_btn = gr.Button(value='Submit', variant='primary')
# with gr.Column(scale=2, min_width=600):
# voice_bot = gr.Chatbot(elem_id="chatbot")
# #txt_box = gr.Textbox(type="text")
# voice_state = gr.State([])
# txt = gr.Textbox(
# show_label=False,
# placeholder="Enter text and press enter"
# ).style(container=False)
# #youtube_url.submit(get_audio, [youtube_url,voice_api_key, state], [voice_bot, state])
# #audio_clean_btn.click(clean_audio, )
# audio_sbmt_btn.click(speech_2_text, inputs=[mic_audio,voice_api_key,voice_state], outputs=[voice_bot, voice_state])
# txt.submit(get_model_reply, [txt, voice_api_key, temperature_input, max_tokens_input,top_p_input, presence_penalty_input,frequency_penalty_input, voice_state], [voice_bot, voice_state])
# txt.submit(lambda :"", None, txt)
dialog_app.launch()